diff --git a/.github/actions/free-disk-space/action.yml b/.github/actions/free-disk-space/action.yml new file mode 100644 index 0000000000..04071710e2 --- /dev/null +++ b/.github/actions/free-disk-space/action.yml @@ -0,0 +1,43 @@ +name: Free Disk Space +description: Reclaims disk space on GitHub-hosted runners + +runs: + using: "composite" + steps: + - shell: bash + run: | + echo "=== Disk usage before cleanup ===" + df -h + + echo "=== Remove Android SDK ===" + sudo rm -rf /usr/local/lib/android/sdk + + echo "=== Apt cleanup ===" + sudo apt-get update + if command -v eatmydata >/dev/null 2>&1; then + SUDO_EAT="sudo eatmydata" + else + echo "eatmydata not found – falling back to plain apt-get" + SUDO_EAT="sudo" + fi + $SUDO_EAT apt-get purge --auto-remove -y \ + azure-cli firefox google-chrome-stable 'llvm-*' microsoft-edge-stable powershell 'temurin-*' 'zulu-*' || true + + sudo apt-get autoclean + sudo apt-get autoremove -y + sudo apt-get clean + + echo "=== Docker cleanup ===" + sudo docker system prune -af --volumes + + echo "=== Disable and remove swap ===" + sudo swapon --show + sudo swapoff -a || true # ignore error when no swap is present + sudo rm -f /mnt/swapfile + + echo "=== Remove potential leftover PV setup image ===" + sudo rm -f /mnt/tmp-pv.img + + echo "=== Disk usage after cleanup ===" + df -h + diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 56e62e1a56..00cfdbd30b 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -295,6 +295,7 @@ jobs: OVN_GATEWAY_MODE: "${{ matrix.gateway-mode }}" OVN_MULTICAST_ENABLE: "false" steps: + - name: Check out code into the Go module directory - from Master branch uses: actions/checkout@v4 with: @@ -317,27 +318,11 @@ jobs: echo "GOPATH=$GOPATH" >> $GITHUB_ENV echo "$GOPATH/bin" >> $GITHUB_PATH + - name: Check out code into the Go module directory - from PR branch + uses: actions/checkout@v4 + - name: Free up disk space - run: | - df -h - sudo rm -rf /usr/local/lib/android/sdk - sudo apt-get update - sudo eatmydata apt-get purge --auto-remove -y \ - azure-cli firefox \ - google-chrome-stable \ - llvm-* microsoft-edge-stable \ - powershell temurin-* zulu-* - # clean unused packages - sudo apt-get autoclean - sudo apt-get autoremove -y - # clean apt cache - sudo apt-get clean - sudo docker system prune -af --volumes - df -h - sudo swapon --show - sudo swapoff -a - sudo rm -f /mnt/swapfile - df -h + uses: ./.github/actions/free-disk-space - name: Download test-image-master uses: actions/download-artifact@v4 @@ -386,9 +371,6 @@ jobs: run: | docker load --input ${CI_IMAGE_PR_TAR} && rm -rf ${CI_IMAGE_PR_TAR} - - name: Check out code into the Go module directory - from PR branch - uses: actions/checkout@v4 - - name: Runner Diagnostics if: always() uses: ./.github/actions/diagnostics @@ -462,7 +444,7 @@ jobs: - {"target": "control-plane", "ha": "noHA", "gateway-mode": "local", "ipfamily": "ipv6", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones"} - {"target": "control-plane", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv4", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "2br", "ic": "ic-single-node-zones"} - {"target": "control-plane", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv6", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "2br", "ic": "ic-single-node-zones", "cni-mode": "unprivileged"} - - {"target": "multi-homing", "ha": "noHA", "gateway-mode": "local", "ipfamily": "ipv4", "disable-snat-multiple-gws": "SnatGW", "second-bridge": "1br", "ic": "ic-disabled"} + - {"target": "multi-homing", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "dualstack", "disable-snat-multiple-gws": "SnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones"} - {"target": "multi-homing-helm", "ha": "HA", "gateway-mode": "shared", "ipfamily": "ipv4", "disable-snat-multiple-gws": "snatGW", "second-bridge": "1br", "ic": "ic-disabled", "network-segmentation": "enable-network-segmentation"} - {"target": "node-ip-mac-migration", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv6", "disable-snat-multiple-gws": "SnatGW", "second-bridge": "1br", "ic": "ic-disabled"} - {"target": "node-ip-mac-migration", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv4", "disable-snat-multiple-gws": "SnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones"} @@ -483,6 +465,7 @@ jobs: - {"target": "network-segmentation", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv6", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones"} - {"target": "bgp", "ha": "noHA", "gateway-mode": "local", "ipfamily": "dualstack", "disable-snat-multiple-gws": "snatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "routeadvertisements": "advertise-default", "network-segmentation": "enable-network-segmentation", "dns-name-resolver": "enable-dns-name-resolver"} - {"target": "bgp", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "dualstack", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "routeadvertisements": "advertise-default", "network-segmentation": "enable-network-segmentation", "dns-name-resolver": "enable-dns-name-resolver"} + - {"target": "bgp", "ha": "noHA", "gateway-mode": "local", "ipfamily": "ipv6", "disable-snat-multiple-gws": "snatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "routeadvertisements": "advertise-default", "network-segmentation": "enable-network-segmentation"} - {"target": "bgp-loose-isolation", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "dualstack", "disable-snat-multiple-gws": "snatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "routeadvertisements": "advertise-default", "network-segmentation": "enable-network-segmentation", "advertised-udn-isolation-mode": "loose"} - {"target": "traffic-flow-test-only","ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv4", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "traffic-flow-tests": "1-24", "network-segmentation": "enable-network-segmentation"} - {"target": "tools", "ha": "noHA", "gateway-mode": "local", "ipfamily": "dualstack", "disable-snat-multiple-gws": "SnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "network-segmentation": "enable-network-segmentation"} @@ -518,6 +501,12 @@ jobs: ADVERTISED_UDN_ISOLATION_MODE: "${{ matrix.advertised-udn-isolation-mode }}" OVN_UNPRIVILEGED_MODE: "${{ matrix.cni-mode == 'unprivileged' }}" steps: + - name: Check out code into the Go module directory + uses: actions/checkout@v4 + + - name: Runner Diagnostics + if: always() + uses: ./.github/actions/diagnostics - name: Install VRF kernel module run: | @@ -527,26 +516,7 @@ jobs: sudo modprobe vrf - name: Free up disk space - run: | - df -h - sudo rm -rf /usr/local/lib/android/sdk - sudo apt-get update - sudo eatmydata apt-get purge --auto-remove -y \ - azure-cli firefox \ - google-chrome-stable \ - llvm-* microsoft-edge-stable \ - powershell temurin-* zulu-* - # clean unused packages - sudo apt-get autoclean - sudo apt-get autoremove -y - # clean apt cache - sudo apt-get clean - sudo docker system prune -af --volumes - df -h - sudo swapon --show - sudo swapoff -a - sudo rm -f /mnt/swapfile - df -h + uses: ./.github/actions/free-disk-space - name: Setup /mnt/runner directory run: | @@ -566,9 +536,6 @@ jobs: sudo systemctl start docker docker.socket docker system info - - name: Check out code into the Go module directory - uses: actions/checkout@v4 - - name: Set up Go uses: actions/setup-go@v5 with: @@ -741,9 +708,14 @@ jobs: KIND_NUM_ZONES: "${{ matrix.num-zones }}" KIND_NUM_NODES_PER_ZONE: "${{ matrix.num-nodes-per-zone }}" steps: + - name: Check out code into the Go module directory uses: actions/checkout@v4 + - name: Runner Diagnostics + if: always() + uses: ./.github/actions/diagnostics + - name: Set up Go uses: actions/setup-go@v5 with: @@ -762,26 +734,7 @@ jobs: echo "$GOPATH/bin" >> $GITHUB_PATH - name: Free up disk space - run: | - df -h - sudo rm -rf /usr/local/lib/android/sdk - sudo apt-get update - sudo eatmydata apt-get purge --auto-remove -y \ - azure-cli firefox \ - google-chrome-stable \ - llvm-* microsoft-edge-stable \ - powershell temurin-* zulu-* - # clean unused packages - sudo apt-get autoclean - sudo apt-get autoremove -y - # clean apt cache - sudo apt-get clean - sudo docker system prune -af --volumes - df -h - sudo swapon --show - sudo swapoff -a - sudo rm -f /mnt/swapfile - df -h + uses: ./.github/actions/free-disk-space - name: Disable ufw # For IPv6 and Dualstack, ufw (Uncomplicated Firewall) should be disabled. diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 5c530c5b86..85b4f69ca5 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -4,9 +4,9 @@ The current Maintainers Group for the ovn-kubernetes Project consists of: | ---- | -------- | ---------------- | | [Girish Moodalbail](https://github.com/girishmg) | NVIDIA | All things ovnkube | | [Jaime Caamaño Ruiz](https://github.com/jcaamano) | Red Hat | All things ovnkube | -| [Nadia Pinaeva](https://github.com/npinaeva) | Red Hat | All things ovnkube | +| [Nadia Pinaeva](https://github.com/npinaeva) | NVIDIA | All things ovnkube | | [Surya Seetharaman](https://github.com/tssurya) | Red Hat | All things ovnkube | -| [Tim Rozet](https://github.com/trozet) | Red Hat | All things ovnkube | +| [Tim Rozet](https://github.com/trozet) | NVIDIA | All things ovnkube | See [CONTRIBUTING.md](./CONTRIBUTING.md) for general contribution guidelines. See [GOVERNANCE.md](./GOVERNANCE.md) for governance guidelines and maintainer responsibilities. diff --git a/contrib/kind-common b/contrib/kind-common index ec7764b616..d598f56467 100644 --- a/contrib/kind-common +++ b/contrib/kind-common @@ -440,7 +440,7 @@ install_cert_manager() { install_kubevirt_ipam_controller() { echo "Installing KubeVirt IPAM controller manager ..." - manifest="https://github.com/kubevirt/ipam-extensions/releases/download/v0.3.0/install.yaml" + manifest="https://github.com/kubevirt/ipam-extensions/releases/download/v0.3.1/install.yaml" run_kubectl apply -f "$manifest" kubectl wait -n kubevirt-ipam-controller-system deployment kubevirt-ipam-controller-manager --for condition=Available --timeout 2m } diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index 3b41e7103c..1e0661f501 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -418,6 +418,19 @@ wait_for_event() { done } +# wait_ovnkube_controller_with_node_done - Wait for ovnkube-controller-with-node process to complete +# Checks if the ovnkube-controller-with-node process is running by looking for its PID file. +# If the PID file exists, waits for that process to finish before continuing. +# If the PID file doesnt exist, it means the process has already exited. +wait_ovnkube_controller_with_node_done() { + local pid_file=${OVN_RUNDIR}/ovnkube-controller-with-node.pid + if [[ -f ${pid_file} ]]; then + echo "info: waiting on ovnkube-controller-with-node process to end" + wait $(cat $pid_file) + echo "info: done waiting for ovn-controller-with-node to end" + fi +} + # The ovnkube-db kubernetes service must be populated with OVN DB service endpoints # before various OVN K8s containers can come up. This functions checks for that. # If OVN dbs are configured to listen only on unix sockets, then there will not be @@ -492,6 +505,36 @@ ovs_ready() { return 0 } +# get_bridge_name_for_physnet - Extract OVS bridge name for a given OVN physical network +# Takes an OVN network name for physical networks (physnet) and returns the corresponding +# OVS bridge name from the ovn-bridge-mappings configuration. +# Return empty string if not found. +get_bridge_name_for_physnet() { + local physnet="$1" + local mappings + mappings=$(ovs-vsctl --if-exists get open_vswitch . external_ids:ovn-bridge-mappings) + # Extract bridge name after physnet: and before next comma (or end) + # regex matches zero or more non-comma characters + # cut on colon and return field number 2 + echo "$mappings" | tr -d "\"" | grep -o "$physnet:[^,]*" | cut -d: -f2 +} + +# Adds drop flows for GARPs on patch port to br-int for specified bridge. +add_garp_drop_flow() { + local bridge="$1" + local cookie="0x0305" + local priority="498" + # if bridge exists, and the patch port is created, we expect to add at least one flow to a patch port ending in to-br-int. + # FIXME: can we generate the exact name. Its possible we add these flows to the incorrect port when selecting on substring + for port_name in $(ovs-vsctl list-ports $bridge); do + if [[ "$port_name" == *to-br-int ]]; then + local of_port=$(ovs-vsctl get interface $port_name ofport) + ovs-ofctl add-flow $bridge "cookie=$cookie,table=0,priority=$priority,in_port=$of_port,arp,arp_op=1,actions=drop" > /dev/null + break + fi + done +} + # Verify that the process is running either by checking for the PID in `ps` output # or by using `ovs-appctl` utility for the processes that support it. # $1 is the name of the process @@ -1732,7 +1775,10 @@ ovnkube-controller() { } ovnkube-controller-with-node() { - trap 'kill $(jobs -p) ; rm -f /etc/cni/net.d/10-ovn-kubernetes.conf ; exit 0' TERM + # send sig term to background job (ovnkube-node process), remove CNI conf and resume background job until it ends. + # currently we the process to background, therefore wait until that process removes its pid file on exit. + # if the pid file doesnt exist, we exit immediately. + trap 'kill $(jobs -p) ; rm -f /etc/cni/net.d/10-ovn-kubernetes.conf ; wait_ovnkube_controller_with_node_done; exit 0' TERM check_ovn_daemonset_version "1.1.0" rm -f ${OVN_RUNDIR}/ovnkube-controller-with-node.pid @@ -1757,6 +1803,23 @@ ovnkube-controller-with-node() { wait_for_event process_ready ovn-controller fi + # start temp work around + # remove when https://issues.redhat.com/browse/FDP-1537 is avilable + if [[ ${ovnkube_node_mode} == "full" && ${ovn_enable_interconnect} == "true" && ${ovn_egressip_enable} == "true" ]]; then + echo "=============== ovnkube-controller-with-node - (add GARP drop flows if external bridge exists)" + # bridge may not yet exist + local bridge_name="$(get_bridge_name_for_physnet 'physnet')" + if [[ "$bridge_name" != "" ]]; then + echo "=============== ovnkube-controller-with-node - found bridge mapping for physnet: $bridge_name" + # nothing to do if the external bridge isn't created. + if ovs-vsctl br-exists $bridge_name; then + echo "=============== ovnkube-controller-with-node - found bridge $bridge_name" + add_garp_drop_flow "$bridge_name" + echo "=============== ovnkube-controller-with-node - (finished adding GARP drop flows)" + fi + fi + fi + ovn_routable_mtu_flag= if [[ -n "${routable_mtu}" ]]; then routable_mtu_flag="--routable-mtu ${routable_mtu}" diff --git a/docs/features/user-defined-networks/user-defined-networks.md b/docs/features/user-defined-networks/user-defined-networks.md index 1b0393bd5e..f2e7348eb7 100644 --- a/docs/features/user-defined-networks/user-defined-networks.md +++ b/docs/features/user-defined-networks/user-defined-networks.md @@ -58,11 +58,11 @@ This feature is enabled by default on all OVN-Kubernetes clusters. You don't need to do anything extra to start using this feature. There is a Feature Config option `--enable-network-segmentation` under `OVNKubernetesFeatureConfig` config that can be used to disable this -feature. However note that disabling the feature will not remove +feature. However, note that disabling the feature will not remove existing CRs in the cluster. This feature has to be enabled along with the flag for multiple-networks `--enable-multi-network` since UDNs use Network Attachment Definitions as underlying implementation detail -construct and reuse the secondary network controllers. +construct and reuse the user-defined network controllers. ## Workflow Description @@ -339,7 +339,7 @@ default `eth0` interface of the pods: _uuid : 1278b0f4-0a14-4637-9d05-83ba9df6ec03 action : allow direction : from-lport -external_ids : {direction=Egress, "k8s.ovn.org/id"="default-network-controller:UDNIsolation:AllowHostARPSecondary:Egress", "k8s.ovn.org/name"=AllowHostARPSecondary, "k8s.ovn.org/owner-controller"=default-network-controller, "k8s.ovn.org/owner-type"=UDNIsolation} +external_ids : {direction=Egress, "k8s.ovn.org/id"="default-network-controller:UDNIsolation:AllowHostARPPrimaryUDN:Egress", "k8s.ovn.org/name"=AllowHostARPPrimaryUDN, "k8s.ovn.org/owner-controller"=default-network-controller, "k8s.ovn.org/owner-type"=UDNIsolation} label : 0 log : false match : "inport == @a8747502060113802905 && (( arp && arp.tpa == 10.244.2.2 ) || ( nd && nd.target == fd00:10:244:3::2 ))" @@ -355,7 +355,7 @@ tier : 0 _uuid : 489ae95b-ae9d-47d0-bf1d-b2477a9ed6a2 action : allow direction : to-lport -external_ids : {direction=Ingress, "k8s.ovn.org/id"="default-network-controller:UDNIsolation:AllowHostARPSecondary:Ingress", "k8s.ovn.org/name"=AllowHostARPSecondary, "k8s.ovn.org/owner-controller"=default-network-controller, "k8s.ovn.org/owner-type"=UDNIsolation} +external_ids : {direction=Ingress, "k8s.ovn.org/id"="default-network-controller:UDNIsolation:AllowHostARPPrimaryUDN:Ingress", "k8s.ovn.org/name"=AllowHostARPPrimaryUDN, "k8s.ovn.org/owner-controller"=default-network-controller, "k8s.ovn.org/owner-type"=UDNIsolation} label : 0 log : false match : "outport == @a8747502060113802905 && (( arp && arp.spa == 10.244.2.2 ) || ( nd && nd.target == fd00:10:244:3::2 ))" @@ -372,7 +372,7 @@ tier : 0 _uuid : 980be3e4-75af-45f7-bce3-3bb08ecd8b3a action : drop direction : to-lport -external_ids : {direction=Ingress, "k8s.ovn.org/id"="default-network-controller:UDNIsolation:DenySecondary:Ingress", "k8s.ovn.org/name"=DenySecondary, "k8s.ovn.org/owner-controller"=default-network-controller, "k8s.ovn.org/owner-type"=UDNIsolation} +external_ids : {direction=Ingress, "k8s.ovn.org/id"="default-network-controller:UDNIsolation:DenyPrimaryUDN:Ingress", "k8s.ovn.org/name"=DenyPrimaryUDN, "k8s.ovn.org/owner-controller"=default-network-controller, "k8s.ovn.org/owner-type"=UDNIsolation} label : 0 log : false match : "outport == @a8747502060113802905" @@ -388,7 +388,7 @@ tier : 0 _uuid : cca19dca-1fde-4a14-841d-7e2cce804de4 action : drop direction : from-lport -external_ids : {direction=Egress, "k8s.ovn.org/id"="default-network-controller:UDNIsolation:DenySecondary:Egress", "k8s.ovn.org/name"=DenySecondary, "k8s.ovn.org/owner-controller"=default-network-controller, "k8s.ovn.org/owner-type"=UDNIsolation} +external_ids : {direction=Egress, "k8s.ovn.org/id"="default-network-controller:UDNIsolation:DenyPrimaryUDN:Egress", "k8s.ovn.org/name"=DenyPrimaryUDN, "k8s.ovn.org/owner-controller"=default-network-controller, "k8s.ovn.org/owner-type"=UDNIsolation} label : 0 log : false match : "inport == @a8747502060113802905" diff --git a/docs/okeps/okep-5094-layer2-transit-router.md b/docs/okeps/okep-5094-layer2-transit-router.md new file mode 100644 index 0000000000..48aa64917e --- /dev/null +++ b/docs/okeps/okep-5094-layer2-transit-router.md @@ -0,0 +1,1041 @@ +# OKEP-5094: Primary UDN Layer2 topology improvements + +* Issue: [#5094](https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5094) + +## Problem Statement + +The primary UDN layer2 topology presents some problems related to VM's live migration that are being addressed by +ovn-kubernetes sending GARPs or unsolicited router advertisement and blocking some OVN router advertisements, although this fixes the issue, is not the most robust way to address the problem and adds complexity to ovn-kubernetes live migration mechanism. + +EgressIP logical router policies (LRP) for layer2 networks are applied on the gateway router, compared to the cluster router for layer3 networks. +To ensure proper traffic load balancing for EgressIPs with multiple IPs defined, especially for pods running on the EgressIP nodes themselves, a workaround was introduced. +This workaround configures the LRP for a pod on one of the EgressIP nodes to use the external gateway IP as one of the next hops to achieve proper load balancing. + +For the following EgressIP: + +```yaml +apiVersion: k8s.ovn.org/v1 +kind: EgressIP + metadata: + annotations: + k8s.ovn.org/egressip-mark: "50000" + name: egressip-1 +... + spec: + egressIPs: + - 172.18.0.100 + - 172.18.0.101 +... + status: + items: + - egressIP: 172.18.0.100 + node: ovn-worker + - egressIP: 172.18.0.101 + node: ovn-worker2 +``` + +The following LRP is present on ovn-worker for a local pod with IP `10.10.0.8`: + +```bash +ovn-nbctl lr-policy-list GR_cluster_udn_l2network_ovn-worker +Routing Policies + 100 ip4.src == 10.10.0.8 reroute 100.65.0.3, 172.18.0.1 pkt_mark=50000 +``` + +This policy redirects traffic the local external gateway `172.18.0.1`, or to the join IP of the second egress node (ovn-worker2) `100.65.0.3`. + +While this approach works in most cases, it has the following limitations: + +* Does not work when there's no default gateway. +* [Does not work on platforms that use `/32` per node](https://issues.redhat.com/browse/OCPBUGS-48301). +* Does not respect multiple gateways and always sends traffic to one of the gateways. + +We can make use of the [new transit router OVN topology entity](https://github.com/ovn-org/ovn/blob/c24b1aa3c724de1aa9fd2461f07e4127a6bfa190/NEWS#L42-L44) to fix these issues and changing the topology for primary UDN layer2. + +## Goals + +1. For layer2 topology advertise default gw with same IP and MAC address independently of the node where + the vm is running. +2. Keep all the layer2 topology features at current topology. +3. Eliminate the dependency on external gateway IPs from layer2 EgressIP implementation. +4. Make the new topology upgradable with minor disruption. + +## Non-Goals + +1. Support non interconnect or interconnect with multiple nodes per zone. +2. Extend the layer2 topology changes to other topologies. + +## Introduction + +### Layer2 default gw discovery at VMs + +Currently at layer2 topology the virtual machine related to default gw +routing looks like the following for ipv4, where the .1 address is configured +using DHCP and pods send an ARP that is only answered by the local gateway +router with the gateway router mac. + +```mermaid +flowchart TD + classDef vmStyle fill:#0000FF,stroke:#ADD8E6,stroke-width:2px; + + subgraph node1["node1"] + GR-node1["GR-node1"] + layer2-switch["Layer2 Switch"] + subgraph VM["Virtual Machine"] + class VM vmStyle; + route["default gw -> 203.203.0.1 (0a:58:64:41:00:02)"] + end + VM -->|"ARP 203.203.0.1"|layer2-switch + end + GR-node1 <-->|"router 203.203.0.1 (0a:58:64:41:00:02)"| layer2-switch + GR-node2 <-->|"remote 100.65.0.3 (0a:58:64:41:00:03)"| layer2-switch + GR-node3 <-->|"remote 100.65.0.4 (0a:58:64:41:00:04)"| layer2-switch + + class VM vmStyle; +``` + +```bash +$ ip route +default via 203.203.0.1 dev eth0 proto dhcp metric 100 + +$ ip neigh +203.203.0.1 dev eth0 lladdr 0a:58:64:41:00:02 REACHABLE +``` + +And this is how it looks for IPv6 where the [RFC](https://datatracker.ietf.org/doc/html/rfc4861#section-4.2) dictates the default gw route +is advertised with the link local address. So every gateway router connected to the +switch will send a router advertisement after receiving the router solicitation from +the virtual machine. +```mermaid +flowchart TD + classDef vmStyle fill:#0000FF,stroke:#ADD8E6,stroke-width:2px; + + subgraph node1["node1"] + GR-node1["GR-node1"] + layer2-switch["Layer2 Switch"] + subgraph VM["Virtual Machine"] + class VM vmStyle; + route["default gw fe80::858:64ff:fe41:2 fe80::858:64ff:fe41:3 fe80::858:64ff:fe41:4"] + end + layer2-switch--> VM + + end + GR-node1 -->|"RA fe80::858:64ff:fe41:2"| layer2-switch + GR-node2 -->|"RA fe80::858:64ff:fe41:3"| layer2-switch + GR-node3 -->|"RA fe80::858:64ff:fe41:4"| layer2-switch + + class VM vmStyle; +``` + +```bash +$ ip -6 route +default proto ra metric 100 pref low + nexthop via fe80::858:64ff:fe41:2 dev eth0 weight 1 + nexthop via fe80::858:64ff:fe41:3 dev eth0 weight 1 + nexthop via fe80::858:64ff:fe41:4 dev eth0 weight 1 + +$ ip neigh +fe80::858:64ff:fe41:3 dev eth0 lladdr 0a:58:64:41:00:03 router STALE +fe80::858:64ff:fe41:4 dev eth0 lladdr 0a:58:64:41:00:04 router STALE +fe80::858:64ff:fe41:2 dev eth0 lladdr 0a:58:64:41:00:02 router STALE +``` + +This is a view of the logical router ports connected to the switch, take +into account that the 203.203.0.1 is only "propagated" on the node where the +vm is running: +```bash +$ ovnk ovn-control-plane ovn-nbctl show GR_test12_namespace.scoped_ovn-control-plane +router 2b9a5f29-ef44-4bda-8d39-45198353013b (GR_test12_namespace.scoped_ovn-control-plane) + port rtos-test12_namespace.scoped_ovn_layer2_switch + mac: "0a:58:64:41:00:03" + ipv6-lla: "fe80::858:64ff:fe41:3" + networks: ["100.65.0.3/16", "2010:100:200::1/60", "203.203.0.1/16", "fd99::3/64"] + +$ ovnk ovn-worker ovn-nbctl show GR_test12_namespace.scoped_ovn-worker +router dbbb9301-2311-4d2f-bfec-64e1caf78b8e (GR_test12_namespace.scoped_ovn-worker) + port rtos-test12_namespace.scoped_ovn_layer2_switch + mac: "0a:58:64:41:00:02" + ipv6-lla: "fe80::858:64ff:fe41:2" + networks: ["100.65.0.2/16", "2010:100:200::1/60", "203.203.0.1/16", "fd99::2/64"] + +$ ovnk ovn-worker2 ovn-nbctl show GR_test12_namespace.scoped_ovn-worker2 +router 148b41ca-3641-449e-897e-0d63bf395233 (GR_test12_namespace.scoped_ovn-worker2) + port rtos-test12_namespace.scoped_ovn_layer2_switch + mac: "0a:58:64:41:00:04" + ipv6-lla: "fe80::858:64ff:fe41:4" + networks: ["100.65.0.4/16", "2010:100:200::1/60", "203.203.0.1/16", "fd99::4/64"] +``` + +So the gist of it is that the default gw ip4 and ipv6 is dependent of where +the VM is running, and that has important implications. + +Also having a multipath IPv6 default gateway means that the egress traffic is load balanced between nodes, across the geneve interconnect. + +### Virtual machine live migration and default gateway + +When a virtual machine is live migrated, it is transferred from the node where it is +running to a different one, in this case it can be from node1 to node2. + +After live migration has finished and the VM is running on a different node, +the VM does *not* initiate any type of ARP or Router Solicitation to reconcile +routes since from its point of view nothing has changed. This means it's running +with the same network configuration, the consequence of that is that the +VM will continue running with its default IPv4 gateway mac address pointing to node1 and +for ipv6 it will continue to be the multipath default gw. + +One common scenario that triggers user live migrating VMs is related to +doing some kind of node maintenance where the node need to go down. The VM is +live migrated to a different node, then the node where it was original running +is shutdown and some maintenance (e.g., hardware changes) is done before starting +it up again. + +With that scenario in mind, after VM has live migrated: +- the default IPv4 default gateway mac will point to a node that is currently down +- one of default IPv6 gateway paths will be pointing to a node that is currently down. + +To fix that for IPv4, ovn-kubernetes sends a GARP after live migration to +reconcile the default gw mac to the new node where the VM is running [Pull Request 4964](https://github.com/ovn-kubernetes/ovn-kubernetes/pull/4964). + +For ipv6 there are changes to do something similar by blocking external gateway +routers RAs [Pull Request 4852](https://github.com/ovn-kubernetes/ovn-kubernetes/pull/4852) and reconciling gateways with unsolicited router advertisements +[Pull Request 4847](https://github.com/ovn-kubernetes/ovn-kubernetes/pull/4847). + +Although these fixes work, they are not very robust since messages can be lost +or blocked so gateway do not get reconciled. + +This is how the topology will look after the virtual machine has being live migrated from node1 to node2 +and shutting down node1 after it. + +ipv4: +```mermaid +flowchart TD + classDef vmStyle fill:#0000FF,stroke:#ADD8E6,stroke-width:2px; + subgraph node1["node1 + DOWN"] + GR-node1["GR-node1 (0a:58:64:41:00:02)"] + end + subgraph node2 + GR-node2["GR-node2 (0a:58:64:41:00:03)"] + subgraph VM["Virtual Machine"] + + class VM vmStyle; + route["default gw -> 203.203.0.1 old node1 instead of node2 + (0a:58:64:41:00:02)"] + end + VM -->layer2-switch + end + subgraph node3 + GR-node3["GR-node3 (0a:58:64:41:00:04)"] + end + GR-node1 <-->|"remote 100.65.0.2 (0a:58:64:41:00:02)"| layer2-switch + GR-node2 <-->|"router 203.203.0.1 (0a:58:64:41:00:03)"| layer2-switch + GR-node3 <-->|"remote 100.65.0.4 (0a:58:64:41:00:04)"| layer2-switch + + class VM vmStyle; +``` + +ipv6: +```mermaid +flowchart TD + classDef vmStyle fill:#0000FF,stroke:#ADD8E6,stroke-width:2px; + + subgraph node1["node1 (DOWN)"] + + GR-node1["GR-node1 (fe80::858:64ff:fe41:2)"] + end + subgraph node2 + GR-node2["GR-node2 (fe80::858:64ff:fe41:3)"] + layer2-switch["Layer2 Switch"] + subgraph VM["Virtual Machine"] + + class VM vmStyle; + route["default gw + down(fe80::858:64ff:fe41:2) + fe80::858:64ff:fe41:3 + fe80::858:64ff:fe41:4"] + end + layer2-switch--> VM + end + subgraph node3 + GR-node3["GR-node3 (fe80::858:64ff:fe41:4)"] + end + + GR-node2 --> layer2-switch + GR-node3 --> layer2-switch + + class VM vmStyle; +``` + +### Layer2 topology limitations for EIP + +The current layer2 topology has some limitations with Egress IP. +When multiple IPs are assigned to an Egress IP, and a pod is local to one +of the egress nodes, only the egress path local to that node will be used. + +## User-Stories/Use-Cases + +### Story 1: seamless live migration + +As a kubevirt user, I want to live migrate a virtual machine using layer2 primary UDN, +so that TCP connections to the external network are not broken and downtime is minimum with network configuration +not being changed within the virtual machine. + +For example: User has a virtual machine serving a video conference using TCP connection and the node +where is running needs to be shut down, so user does a live migration to move to other nodes, the video +should continue with minimum downtime without changing virtual machine network configuration. + +### Story 2: EIP for layer2 limitations + +As an EIP L2 UDN user, new connections from a pod should be balanced correctly over multiple Egress IPs. + +## Proposed Solution + +The OVN community did introduce a new network topology element [**transit router**](https://www.ovn.org/support/dist-docs/ovn-nb.5.html) that allows logical routers that are shared between OVN availability zones, this make possible to use a cluster router similar to layer3 topology ovn_cluster_router for layer2 +so the logical router port that is connected to the layer2 switch will have just the .1 address and mac and ipv6 lla generated +with it. + +### Ports, switches and routers topology + +This is an overview of the topology with the transit subnet used to connect the transit_router and gateway routers directly (without a new logical switch - direct router port connections using the NB.Logical_Router_Port.peer field between the GR and the new transit_router). + + +OVN routers cannot have multiple ports in the same subnet, so the trtor (transit router to gateway router) ports of the transit_router need to have the minimal possible subnet, to accommodate at least 2 IPs one per peer, from the +transit switch subnet to connect the two peers, trtor (transit_router) <-> rtotr (GR). + +#### IPv4 + +Address config +```yaml +subnet: 203.203.0.0/24 +join-subnet: 100.65.0.0/16 +transit-subnet: 100.88.0.0/16 +transit-peers-node-subnet: + node1: 100.88.0.4/31 + node2: 100.88.0.8/31 + node3: 100.88.0.6/31 +``` + +The transit per node subnet has to reserve 2 addresses for the transit router side and gateway router side. + +Let's inspect node1 transit-peers-node-subnet with the `100.88.0.4/31` subnet: +GR and transit_router peers ports should use `100.88.0.4` and `100.88.0.5`. + +```mermaid +%%{init: {"nodeSpacing": 20, "rankSpacing": 100}}%% +flowchart TD + classDef nodeStyle fill:orange,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef vmStyle fill:blue,stroke:none,color:white,rx:10px,ry:10px,font-size:25px; + classDef portStyle fill:#3CB371,color:black,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef routerStyle fill:brown,color:white,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef switchStyle fill:brown,color:white,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef termStyle font-family:monospace,fill:black,stroke:none,color:white; + subgraph node1["node1"] + subgraph GR-node1 + rtotr-GR-node1["trtor-GR-node1 + 100.65.0.2/16 100.88.0.5/31 (0a:58:64:41:00:02)"] + end + subgraph VM["Virtual Machine"] + class VM vmStyle; + term["default gw + 203.203.0.1 + (0a:58:CB:CB:00:01)"] + end + end + subgraph node2 + subgraph GR-node2 + rtotr-GR-node2["rtotr-GR-node2 100.65.0.3/16 100.88.0.9/31 (0a:58:64:41:00:03)"] + end + end + subgraph node3 + subgraph GR-node3 + rtotr-GR-node3["rtotr-GR-node3 100.65.0.4/16 100.88.0.7/31 (0a:58:64:41:00:04)"] + end + end + subgraph layer2-switch + stor-transit_router["stor-transit_router + type: router"] + end + subgraph transit_router["transit_router "] + trtor-GR-node1["trtor-GR-node1 100.88.0.4/31"] + trtor-GR-node2["trtor-GR-node2 100.88.0.8/31"] + trtor-GR-node3["trtor-GR-node3 100.88.0.6/31"] + rtos-layer2-switch["rtos-layer2-switch 203.203.0.1 (0a:58:CB:CB:00:01)"] + end + rtotr-GR-node1 <--> trtor-GR-node1 + rtotr-GR-node2 <--> trtor-GR-node2 + rtotr-GR-node3 <--> trtor-GR-node3 + VM <-->layer2-switch + rtos-layer2-switch <--> stor-transit_router + + class VM vmStyle; + class rtotr-GR-node1 portStyle; + class rtotr-GR-node2 portStyle; + class rtotr-GR-node3 portStyle; + class trtor-GR-node1 portStyle; + class trtor-GR-node2 portStyle; + class trtor-GR-node3 portStyle; + class stor-transit_router portStyle; + class rtos-layer2-switch portStyle; + class GR-node1 routerStyle; + class GR-node2 routerStyle; + class GR-node3 routerStyle; + class transit_router routerStyle; + class layer2-switch switchStyle + class term termStyle; + class node1,node2,node3 nodeStyle; + +``` + +As shown by the topology the VM default gw IP is the first address from the subnet `203.203.0.1` and the mac address is computed from it +so this makes it __independent__ of where the VM is running. + +#### IPv6 + +Address config +```yaml +subnet: 2010:100:200::0/60 +join-subnet: fd99::/64 +transit-subnet: fd97::/64 +transit-peers-node-subnet: + node1: fd97::8/127 + node2: fd97::6/127 + node3: fd97::4/127 +``` +In the case of IPv6 there is no broadcast concept so reserving two addresses (one bit) is enough that means the transit-peers-node-subnet size should +be `128 - 1 = 127`. + +Again `ipcalc` we can inspect the subnet for node1: +```bash +$ ipcalc fd97::8/127 +Full Network: fd97:0000:0000:0000:0000:0000:0000:0008/127 +Network: fd97::8/127 +Netmask: ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe = 127 + +Address space: Unique Local Unicast +HostMin: fd97::8 +HostMax: fd97::9 +Hosts/Net: 2 +``` +In this case the node1 gateway and ovn_cluster routers peer ports should use `fd97::8` and `fd97::9`. + +```mermaid +%%{init: {"nodeSpacing": 20, "rankSpacing": 100}}%% +flowchart TD + classDef nodeStyle fill:orange,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef vmStyle fill:blue,stroke:none,color:white,rx:10px,ry:10px,font-size:25px; + classDef portStyle fill:#3CB371,color:black,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef routerStyle fill:brown,color:white,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef switchStyle fill:brown,color:white,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef termStyle font-family:monospace,fill:black,stroke:none,color:white; + subgraph node1["node1"] + subgraph GR-node1 + rtotr-GR-node1["trtor-GR-node1 + fd97::9/127 fd99::4/64 (0a:58:64:41:00:02)"] + end + subgraph VM["Virtual Machine"] + class VM vmStyle; + term["default gw + fe80::858:cbff:fecb:1 + (0a:58:CB:CB:00:01)"] + end + end + subgraph node2 + subgraph GR-node2 + rtotr-GR-node2["rtotr-GR-node2 fd97::7/127 fd99::3/64 (0a:58:64:41:00:03)"] + end + end + subgraph node3 + subgraph GR-node3 + rtotr-GR-node3["rtotr-GR-node3 fd97::5/127 fd99::2/64 (0a:58:64:41:00:04)"] + end + end + subgraph layer2-switch + stor-transit_router["stor-transit_router + type: router"] + end + subgraph transit_router["transit_router "] + trtor-GR-node1["trtor-GR-node1 fd97::8/127"] + trtor-GR-node2["trtor-GR-node2 fd97::6/127"] + trtor-GR-node3["trtor-GR-node3 fd97::4/127"] + rtos-layer2-switch["rtos-layer2-switch 2010:100:200::1 (ipv6-lla: fe80::858:cbff:fecb:1) (0a:58:CB:CB:00:01)"] + end + rtotr-GR-node1 <--> trtor-GR-node1 + rtotr-GR-node2 <--> trtor-GR-node2 + rtotr-GR-node3 <--> trtor-GR-node3 + VM <-->layer2-switch + rtos-layer2-switch <--> stor-transit_router + + class VM vmStyle; + class rtotr-GR-node1 portStyle; + class rtotr-GR-node2 portStyle; + class rtotr-GR-node3 portStyle; + class trtor-GR-node1 portStyle; + class trtor-GR-node2 portStyle; + class trtor-GR-node3 portStyle; + class stor-transit_router portStyle; + class rtos-layer2-switch portStyle; + class GR-node1 routerStyle; + class GR-node2 routerStyle; + class GR-node3 routerStyle; + class transit_router routerStyle; + class layer2-switch switchStyle + class term termStyle; + class node1,node2,node3 nodeStyle; +``` + +According to RFC 4861/4862, routers advertise a link-local address +(`fe80::/64`). Its Interface Identifier (IID) is derived from the +router's MAC address (e.g., `fe80::858:cbff:fecb:1`). This LLA +remains stable as a default gateway, independent of the virtual +machine's hosting node. + +#### Tunnel keys for the transit switches/routers + +Every OVN switch and router has a unique tunnel key. It has to be manually allocated for all transit switches and routers, +but "regular" (non-transit) switches and routers currently use automatic allocation by the northd. +OVN has a reserved range of 2^16 for the transit switches and routers https://github.com/ovn-org/ovn/commit/969f7f54ea3868172f99119991d8d875bb8f240c +that won't be assigned by the northd for the regular switches and routers to avoid conflict. + +We already use this key for the Layer3 transit switches and Layer2 switches. +So currently we can support not more than 2^16 (65K) UDNs or to be more precise, NADs, of any topology type +(since the tunnel-key is allocated based on the network-id, which in turn is also allocated for Localnet networks). +Now we need more transit routers for the layer2 topology, but the allowed tunnel key range for all transit switches and routers stays the same. +The same problem will arise when we get to UDN interconnect, because it will use even more transit routers. + +For this enhancement we have 3 options: +1. We could just split the range into 3 equal parts, use the first part for transit switches, the second part for layer2 transit routers, +and the third part for UDN interconnect transit routers. That means we could support up to 21K UDNs (NADs) and +up to 21K UDN interconnects. If we ever hit the scale limit, we can proceed with option 4. For backwards-compatability, +the first 21K keys for transit switches/routers will be allocated in the old range, and the new keys will use the new range. +This option doesn't allow adding more features that require tunnel keys in the future since the whole range will be already consumed. +2. Second option is to use tunnel-keys outside the protected range. Current range is split into [0, 2^24-2^16-1] for northd +allocation and [2^24-2^16, 2^24-1] for interconnect. Northd does sequential key allocation, that means +(for single-node-zone) it will only use `( + 1)*5` keys, so the following range `[( + 1)*5; 2^24-2^16-1]` +will be unused, and we could start allocating transit router keys from `2^24-2^16-1` going down. This is kind of dangerous +as it only relies on the current northd behaviour. +3. Another option (centralized) is to add ID allocation from the transit tunnel key range. There is no way to migrate existing transit +switches to the new tunnel key, because all nodes need to do that at the same time, and rolling upgrade won't help here. +Since current limit for networkIDs is `4096` https://github.com/ovn-kubernetes/ovn-kubernetes/blob/8f6e3ee9883bb6eb2230cea5c1c138d6098c95b0/go-controller/pkg/networkmanager/api.go#L17 +we can consider [`first-interconnect-key`, `first-interconnect-key`+4095] to be reserved for the legacy transit switch keys, +and use a new centralized transit keys allocation in the range [`first-interconnect-key`+4095, 2^24-1]. This option +requires up to 2 new annotations on NADs, and makes the number of supported UDNs dependent on the number of UDN interconnects. +4. Fourth approach (distributed) is to start assigning all datapath keys ourselves (for all switches and routers, currently it is done by the northd), +then we could use the full tunnel-key range of `2^24` (16M). Every regular (non-transit) switch or router only has to use +unique key within its zone (that is within node for single-node-zone IC). That means that every node could +re-use the same keys for regular datapaths (but it won't work for multi-node zones, the question is whether we need to support it). +Currently, every Layer3 topology (the most switches/routers) needs 7 keys (node switch, cluster router, join switch, +gw router, transit router, transit switch, ext switch), so we could reserve 20 keys per network (just to be safe). +These keys could be derived from the network-id, and then we could support e.g. 100K UDNs (2M keys) + 1M UDN interconnects (1M keys), +and still have 12M keys left. + +We have agreed that we want to avoid implementing option 4 for as long as possible, which means we need to use +tunnel keys in the most efficient way, which means option 3 is the way to go. +We will add error handling in case we run out of tunnel keys and document the dependency on supported number of UDNs +vs UDN interconnects (e.g. we can support 65K Layer3 UDNs or 32K Layer2 UDNs). + +#### NAT configuration +As much as possible everything related to conntrack should not be modified since doing so can affect tcp connections. + +The only nat rule that needs to be moved from GR to transit_router is the one SNATing the traffic from the network subnet that goes to the management port. This is needed because now ovn_custer_router is the router directly connected to the layer2 switch (instead of the GR). The rest of the NAT configurations can stay unchanged on the GR. + +``` +allowed_ext_ips : [] +exempted_ext_ips : [] +external_ids : {"k8s.ovn.org/network"=test12_namespace-scoped, "k8s.ovn.org/topology"=layer2} +external_ip : "169.254.0.12" +external_mac : [] +external_port_range : "" +gateway_port : [] +logical_ip : "203.203.0.0/16" +logical_port : rtos-test12_namespace.scoped_ovn_layer2_switch +match : "eth.dst == 0a:58:cb:cb:00:02" +options : {stateless="false"} +priority : 0 +type : snat +``` + +Also the fact that join IP at gateway router is kept allow to maintain all the NATing done with it at gateway router to define OVN load balancers to implement k8s services. + +#### Static Routes and Logical Router Policies +Changing where routes and policies are configured does not affect TCP connections on upgrade so we can move some routes and policies from gateway router +to transit_router without issues. In general it will be similar to the layer3 routes and policies. + +These will be the routes and policies configured on the new transit_router +``` +IPv4 Routes +Route Table
: + 10.96.0.1 203.203.0.2 dst-ip <-- api service + 10.96.0.10 203.203.0.2 dst-ip <-- dns service + 100.65.0.2 100.88.0.6 dst-ip <-- node1 traffic use node2 gr peer ip + 100.65.0.3 100.88.0.10 dst-ip <-- node2 traffic use node2 gr peer ip + 100.65.0.4 100.88.0.14 dst-ip <-- node3 traffic use node2 gr peer ip + 203.203.0.0/16 100.88.0.14 src-ip <-- network egress traffic goes to local gateway router peer ip +``` +``` + 1004 inport == "rtos-test12_namespace.scoped_ovn_layer2_switch" && ip4.dst == 172.18.0.3 /* test12_namespace.scoped_ovn_layer2_switch */ reroute 203.203.0.2 + 1004 inport == "rtos-test12_namespace.scoped_ovn_layer2_switch" && ip6.dst == fc00:f853:ccd:e793::3 /* test12_namespace.scoped_ovn_layer2_switch */ reroute 2010:100:200::2 + 102 (ip4.src == $a10466913729612642039 || ip4.src == $a13607449821398607916) && ip4.dst == $a3613486944346402462 allow + 102 (ip4.src == $a10466913729612642039 || ip4.src == $a13607449821398607916) && ip4.dst == $a712973235162149816 allow pkt_mark=1008 + 102 (ip6.src == $a2718358047735721557 || ip6.src == $a13607452020421864338) && ip6.dst == $a1091196985512978262 allow pkt_mark=1008 + 102 (ip6.src == $a2718358047735721557 || ip6.src == $a13607452020421864338) && ip6.dst == $a3613484745323146040 allow + 102 ip4.src == 203.203.0.0/16 && ip4.dst == 100.64.0.0/16 allow + 102 ip4.src == 203.203.0.0/16 && ip4.dst == 203.203.0.0/16 allow + 102 ip6.src == 2010:100:200::/60 && ip6.dst == 2010:100:200::/60 allow + 102 ip6.src == 2010:100:200::/60 && ip6.dst == fd98::/64 allow + 102 pkt.mark == 42 allow +``` + +And these will be the gateway router configured routes (no policies needed): +``` +IPv4 Routes +Route Table
: + 169.254.0.0/17 169.254.0.4 dst-ip rtoe-GR_test12_namespace.scoped_ovn-control-plane + 203.203.0.0/16 100.88.0.13 dst-ip rtotr-GR_test12_namespace.scoped_ovn-control-plane <-- cluster ingress or egress reply traffic going towards the pod network via transit_router peer IP. + 0.0.0.0/0 172.18.0.1 dst-ip rtoe-GR_test12_namespace.scoped_ovn-control-plane +``` + +The logical router policies for EgressIP will be present on the transit router, using transit node peer ip as the next hop: + +``` +100 ip4.src == 10.10.0.8 reroute 100.88.0.6,100.88.0.10 pkt_mark=50000 +``` + +This approach eliminates the dependency on external IP addresses while maintaining proper load balancing for EgressIPs with multiple IP addresses. + +#### Router to router direct connection without a switch + +To connect the transit_router to the local gateway router instead of using a join switch like the layer3 topology those ports can be connected each other directly +using the `peer` field. + +This is how it looks the `trtor` port at transit_router: +``` +_uuid : f697bce1-dac7-442d-9355-e298e1735c7b +dhcp_relay : [] +enabled : [] +external_ids : {"k8s.ovn.org/network"=test12_namespace-scoped, "k8s.ovn.org/topology"=layer2} +gateway_chassis : [] +ha_chassis_group : [] +ipv6_prefix : [] +ipv6_ra_configs : {} +mac : "0a:58:64:58:00:0d" +name : trtor-GR_test12_namespace.scoped_ovn-control-plane +networks : ["100.88.0.8/31", "fd97::8/127"] +options : {requested-tnl-key="4"} +peer : rtotr-GR_test12_namespace.scoped_ovn-control-plane <------------- peer field +status : {} +``` + +And this is the `rtotr` port at GR: +``` +_uuid : f9ab92f3-478c-41dd-b845-0d8ddf4a34e5 +dhcp_relay : [] +enabled : [] +external_ids : {"k8s.ovn.org/network"=test12_namespace-scoped, "k8s.ovn.org/topology"=layer2} +gateway_chassis : [] +ha_chassis_group : [] +ipv6_prefix : [] +ipv6_ra_configs : {} +mac : "0a:58:64:41:00:04" +name : rtotr-GR_test12_namespace.scoped_ovn-control-plane +networks : ["100.65.0.4/16", "100.88.0.9/31", "fd97::9/127", "fd99::4/64"] +options : {gateway_mtu="1400"} +peer : trtor-GR_test12_namespace.scoped_ovn-control-plane <-------------- peer field +status : {} + +``` + +#### Transit router specifics + +For transit router to work the LRPs referencing the same gateway router peer should have a unique tunnel key and if they are remote also a +requested chassis pointing to the gateway router node. + +Also in the case we still need to support conditional SNAT, the transit router port connected to the switch needs to be configured as gateway router port. + +```mermaid +%%{init: {"nodeSpacing": 20, "rankSpacing": 100}}%% +flowchart TD + classDef nodeStyle fill:orange,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef vmStyle fill:blue,stroke:none,color:white,rx:10px,ry:10px,font-size:25px; + classDef portStyle fill:#3CB371,color:black,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef routerStyle fill:brown,color:white,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef switchStyle fill:brown,color:white,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef termStyle font-family:monospace,fill:black,stroke:none,color:white; + subgraph node2 + subgraph GR-node2 + rtos-GR-node2["rtos-GR-node2 100.65.0.3/16 (0a:58:64:41:00:03)"] + end + subgraph layer2-switch_node2["layer2-switch"] + stor-GR-node1["stor-GR-node1 + type: remote
requested-tnl-key: 4"] + stor-GR-node2["stor-GR-node2 + type: router
requested-tnl-key: 5"] + end + end + subgraph node1["node1"] + subgraph GR-node1 + rtotr-GR-node1["trtor-GR-node1 + 100.65.0.2/16 100.88.0.5/31 (0a:58:64:41:00:02)"] + end + subgraph transit_router_node1["transit_router "] + trtor-GR-node1["trtor-GR-node1 100.88.0.4/31"] + rtos-layer2-switch["rtos-layer2-switch 203.203.0.1 (0a:58:CB:CB:00:01)"] + end + subgraph layer2-switch_node1["layer2-switch"] + stor-transit_router["stor-transit_router + type: router
requested-tnl-key: 4"] + stor-GR-node1-node2["stor-GR-node2 + type: remote
requested-tnl-key: 5"] + end + end + + + + rtotr-GR-node1 <--> trtor-GR-node1 + rtos-layer2-switch <--> stor-transit_router + stor-GR-node1 <--> stor-transit_router + stor-GR-node2 <--> rtos-GR-node2 + stor-GR-node1-node2 <--> stor-GR-node2 + + class VM vmStyle; + class rtotr-GR-node1 portStyle; + class rtos-GR-node2 portStyle; + class stor-GR-node2 portStyle; + class stor-GR-node1-node2 portStyle; + class stor-GR-node1 portStyle; + class trtor-GR-node1 portStyle; + class trtor-GR-node2 portStyle; + class stor-transit_router portStyle; + class rtos-layer2-switch portStyle; + class GR-node1 routerStyle; + class GR-node2 routerStyle; + class transit_router_node1 routerStyle; + class layer2-switch_node1 switchStyle; + class layer2-switch_node2 switchStyle; + class term termStyle; + class node1,node2 nodeStyle; +``` + +### Transit subnet conflict + +Before this change Layer2 networks using Subnet overlapping with the `transitSubnet` were allowed to be created (and would be working just fine). +Now it is not possible anymore, and we need to decide what to do in this case: +1. Before upgrading to the new topology, check that no Layer2 networks with overlapping subnet are present. +If they are, block the upgrade and inform the user to fix the issue. This option prevents the user from getting new topology +for all networks until the next upgrade. +2. Upgrade all nodes, but leave old topology for networks with overlapping subnet. Report an event/warning for the user to fix the issue. +When the network is upgraded/re-created with non-overlapping subnet, it will get the new topology. Other networks will be +properly upgraded. +3. Add a new config field for Layer2 network similar to [JoinSubnets](https://github.com/ovn-kubernetes/ovn-kubernetes/blob/ff3001ba43bf724e477ab45167dc55d929042774/go-controller/pkg/crd/userdefinednetwork/v1/shared.go#L178) +to allow users to specify a custom transit subnet for the network, in case their Subnet overlaps with the default transit subnet. +We don't allow UDN spec updates, so the only way to use this option (without introducing UDN spec updates) is to +re-create a UDN with the same fields + the new transit subnet and then migrate all the workloads to it. +4. Automatically select non-overlapping transit subnet and report it via status (it may be needed to avoid subnet overlap for Connecting UDNs). +Transit subnet is not exposed to pods, so it may be fine to select it automatically as opposed to joinSubnet. + +Options 1 and 2 will require one more release of supporting the old topology (and in the next release we break the +networks if they were not upgraded). +We think that overlapping subnets is a likely scenario, and we want this upgrade to be as smooth as possible, so +option 4 is the least disruptive. + +### API Details + +Every node will get a temporary annotation "k8s.ovn.org/layer2-topology-version: 2.0" that will be removed in the next +ovn-k version (1.3 is the feature gets in 1.2). +Every Layer2 and Layer3 NAD will get new annotations for tunnel keys distribution. + +### Implementation Details + +#### At ovnkube-node + +Create a helper that is able to derive the peer IPs from the node annotation +for the transit peers node subnet. + +The layer2 controller should do the following: +- Adapt and call the syncNodeClusterRouterPort from layer3 that creates the LRP that connects to the switch at transit_router +- Pass transit_router as cluster_name to the gateway init functions +- Remove from gateway.go the code that was attaching GR to layer2 switch +- Add to gateway.go the code that connects GR to transit_router using the peer ports +- Change egressip.go so routes are configured at transit_router instead of gateway router. +- Change the ZoneInterConnect handler to be able to add the transit router remote ports to transit_router +- In general re-use as much Layer3 code as possible since these makes Layer2 topology similar to it. + +#### At ovnkube-control-plane + +At cluster manager at the zoneCluster controller calculate the peers node subnet +and annotate the node. + +To calculate the transit router subnet for each node we have two options: +- Derive it from the node-id (same we do for transit switch addrs. `offset := 2 * nodeID; subnet := 100.88.0.0 + offset /31`): + - good: + - No need to maintain the allocator lifecycle + - It's possible to entertain the idea of not annotating the node since the source of truth is the node-id + - bad: + - Add complexity in the form of node-id to subnet derivation code + - Make the transit peers node subnet dependent on node-id +- Create a new subnet allocator that will re-sync on restarts and allocate + the subnet with it: + - good: + - Is simpler since it's matter of a new subnet allocator + - The transit peers node subnet is no longer dependent on node-id + - bad: + - Need to maintain the allocator lifecycle + - Use more memory + +We will go with the first option to avoid polluting already overloaded node annotations. + +### Rolling upgrade and traffic disruption + +OpenShift upgrades work as follows: first the ovn-k pods are upgraded while the +workload pods are still running. We can't upgrade topology at this point, +because it includes SNAT for the management port move from the GR to the transit router, which is disruptive +for existing connections. Some time after that the node is drained (no more workload pods are left) +and rebooted, at this point ovn-k is restarted with no workload pods, which is the time +when we can make the topology upgrade. + +From the ovn-k side we need to figure out when is that time with no workload pods, so we will +introduce a function similar to https://github.com/ovn-kubernetes/ovn-kubernetes/pull/5416/commits/8adda434a35831c49e9bd19b86888bfb074be89f#diff-214cf3919602fd060047b8d15fd6c0ca9d3ed3d42c47fff4b181a072c182b673R306 +that will check whether a given network has already been updated and if not if it has running pods, +and only upgrade the topology when it doesn't. +This means that we will have to leave the code for the previous topology version in place for one more release, +and only cleanup afterwards. + +This means, we don't need to worry about existing connections disruption (since the topology upgrade happens +after the node reboot, so there are no running connections), but we need to make sure that new connections +created when some nodes are upgraded and some are not will work and won't be disrupted. + +#### Remote pod traffic + +The pod to remote pod traffic is not affected since the logical switch will +keep intact the pod's local and remote LSPs with its tunnel key. + +No downtime expected during upgrade + +#### Egress traffic over local node + +At normal egress without an egress IP, traffic exits through the node +hosting the pod/VM. Therefore, a partial cluster upgrade does not impact +the logical network topology. + +During the upgrade all the VMs are evicted from the node, hence, they'll be migrated to another node - +and the GR MACs will be updated via the VM migration process. + +#### Egress/Ingress traffic over remote node + +Both egress (using egressip feature) and ingress can come from a remote node, +this introduces problematic scenarios that can happen during an upgrade: +- A pod's node has new topology, but its ingress/egress node has old topology. +- A pod's node has old topology, but its ingress/egress node has new topology. + +The following diagram show a possible topology to support those two scenarios: + +```mermaid +%%{init: {"nodeSpacing": 20, "rankSpacing": 100}}%% +flowchart TD + classDef nodeStyle fill:orange,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef vmStyle fill:blue,stroke:none,color:white,rx:10px,ry:10px,font-size:25px; + classDef portStyle fill:#3CB371,color:black,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef routerStyle fill:brown,color:white,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef switchStyle fill:brown,color:white,stroke:none,rx:10px,ry:10px,font-size:25px; + classDef termStyle font-family:monospace,fill:black,stroke:none,color:white; + subgraph node2 + subgraph GR-node2 + rtos-GR-node2["rtos-GR-node2 100.65.0.3/16
203.203.0.1 (0a:58:64:41:00:03)"] + end + subgraph layer2-switch_node2["layer2-switch"] + stor-GR-node1["stor-GR-node1 + type: remote
requested-tnl-key: 4
100.65.0.2/16 (0a:58:64:41:00:02)"] + stor-GR-node2["stor-GR-node2 + type: router
requested-tnl-key: 5"] + end + end + subgraph node1["node1"] + subgraph GR-node1 + rtotr-GR-node1["rtotr-GR-node1 + 100.65.0.2/16 100.88.0.5/31 (0a:58:64:41:00:02)"] + end + subgraph transit_router_node1["transit_router "] + trtor-GR-node1["trtor-GR-node1 100.88.0.4/31"] + rtos-layer2-switch["trtos-layer2-switch 203.203.0.1 169.254.0.22/17 (0a:58:CB:CB:00:01)"] + trtos-layer2-switch-upgrade["trtos-layer2-switch-upgrade 100.65.255.254 (0a:58:64:41:00:02)"] + end + subgraph layer2-switch_node1["layer2-switch"] + stotr-layer2-switch["stotr-layer2-switch + type: router"] + stotr-layer2-switch-upgrade["stotr-layer2-switch-upgrade + type: router
requested-tnl-key: 4"] + stor-GR-node1-node2["stor-GR-node2 + type: remote
requested-tnl-key: 5
100.65.0.3/16 (0a:58:64:41:00:03)"] + end + end + + rtotr-GR-node1 <--> trtor-GR-node1 + rtos-layer2-switch <--> stotr-layer2-switch + stotr-layer2-switch-upgrade <--> stor-GR-node1 + stor-GR-node2 <--> rtos-GR-node2 + stor-GR-node1-node2 <--> stor-GR-node2 + trtos-layer2-switch-upgrade <--> stotr-layer2-switch-upgrade + + class VM vmStyle; + class rtotr-GR-node1 portStyle; + class rtos-GR-node2 portStyle; + class stor-GR-node2 portStyle; + class stor-GR-node1-node2 portStyle; + class stor-GR-node1 portStyle; + class trtor-GR-node1 portStyle; + class trtor-GR-node2 portStyle; + class stotr-layer2-switch portStyle; + class stotr-layer2-switch-upgrade portStyle; + class rtos-layer2-switch portStyle; + class trtos-layer2-switch-upgrade portStyle; + class GR-node1 routerStyle; + class GR-node2 routerStyle; + class transit_router_node1 routerStyle; + class layer2-switch_node1 switchStyle; + class layer2-switch_node2 switchStyle; + class term termStyle; + class node1,node2 nodeStyle; +``` + +The intermediate upgrade topology parts are bold, and will be removed once all nodes finish the upgrade. +Nodes using the new topology must perform the following actions: +- At the distributed switch: + - Retain `remote` type stor-GR LSPs from nodes still using the + old topology. + - Create a tmp `router` type stotr-upgrade LSP with `router-port` and same tunnel key as before + pointing to rtos-GR. + - Create a tmp transit router port `trtos-layer2-switch-upgrade` with the GR MAC address + - Add a dummy IP from the join subnet to the `trtos-layer2-switch-upgrade` port to enable pod on node1 -> remote GR traffic. + - Add `trasit_router` routes to steer joinIP traffic for the ole nodes to the `trtos-layer2-switch-upgrade` port. + These routes look silly, but they work, like `100.65.0.3 100.65.0.3 dst-ip` for every joinIP of the old nodes. + +We need to make sure joinSubnet works between upgraded and non-upgraded nodes, as this is the only network that both topologies +understand: +- We need to make sure pod2 on node2 -> `100.65.0.2` works (it is used e.g. for service reply traffic that came from node1 to pod2) + - pod2(`203.203.0.3`) -> default GW (`203.203.0.1`) + - GR will ARP for `100.65.0.2` and hit layer2-switch -> will use remote port `stor-GR-node1` config and ARP reply with `0a:58:64:41:00:02` + - `layer2-switch` sends packet to `0a:58:64:41:00:02` via `stor-GR-node1` with `tunnel-key=4` + - now we cross interconnect to node1 `stotr-layer2-switch-upgrade` + - it sends packet to `trtos-layer2-switch-upgrade` on the `transit_router` via direct peer connection + - MAC address `0a:58:64:41:00:02` is owned by the port and will be accepted by the transit_router + - dst IP=`100.65.0.2` => route lookup => choose between old joinIP routes via port `trtos-layer2-switch-upgrade` + and new transit route `100.65.0.2/32` via `100.88.0.6` + - `transit_router` will choose `100.65.0.2/32` with the longest-prefix match and send it to the `GR-node1` +- We need to make sure pod 1 on node1 -> `100.65.0.3` works (it is used e.g. for service reply traffic that came from node2 to pod1) + - pod1(`203.203.0.2`) -> default GW (`203.203.0.1`) + - `transit_router` will do route lookup and use `100.65.0.3/32` via `trtos-layer2-switch-upgrade` since it has the dummy IP + from the same subnet + - `transit_router` will ARP for `100.65.0.3` and go to the `layer2-switch`, which will use the remote port `stor-GR-node2` config + and ARP reply with `0a:58:64:41:00:03` + - `transit_router` sends packet to dst MAC `0a:58:64:41:00:03` via `trtos-layer2-switch-upgrade` + - `layer2-switch` sends it out via `stor-GR-node2` with `tunnel-key=5` + - `node2` handles the packet exactly as with the old topology + +For comparison, fully upgraded scenario for ingress service via node1 to local pod1 `203.203.0.2` looks like this: +- incoming packet gets DNAT'ed to the `serviceIP` and SNAT'ed to the node masq IP (src=`169.254.0.2`), then comes to the GR +- GR does its usual DNAT to the backend pod IP (dst=`203.203.0.2`) and SNAT to the joinIP (src=`100.65.0.2`) +- GR sends the packet via `rtotr-GR-node1` to the `transit_router` +- `transit_router` sends the packet (dst=`203.203.0.2`) directly to the `layer2-switch`, done +Now reply: +- pod1 (`203.203.0.2`) replies to `100.65.0.2` -> default GW (`203.203.0.1`) +- `transit_router` will ARP for `100.65.0.2` and get a reply from `GR-node1` via port `rtotr-GR-node1` with `0a:58:64:41:00:02` +- `transit_router` will do route lookup and choose `100.65.0.2/32` via `100.88.0.6` with the longest-prefix match + and send it to the `GR-node1` +- the usual un-DNAT (dst=`169.254.0.2`), un-SNAT (src=`serviceIP`) happens here before being sent out + +Some of the scenarios that we care about: +- Ingress traffic coming to GR via service + - upgraded GR -> non-upgraded node pod: we SNAT to the same joinIP as before, reply traffic comes back to the + `transit_router` because `trtos-layer2-switch-upgrade` port is configured with the GW MAC. + - non-upgraded GR -> upgraded node pod: uses the same joinIP as before. Reply uses the newly assigned dummyIP + from the join subnet, to enable routing from the upgraded node pod to the joinIP subnet. Without the dummy IP + transit_router doesn't know what to do with the `dst: 100.65.0.3` traffic. +- Egress IP traffic: uses the same joinIP to route as before, works similar to service traffic. + +The same GR MAC address is used on the GR port (`rtotr-GR-node1`) and the transit router port (`trtos-layer2-switch-upgrade`). +That's fine because they're different L2 domains. + +Dummy IP can be resued on all nodes, since it is never used as a source or destination IP, it is only used for the routing decision. +You may wonder why we need per-joinIP routes if dummyIP should be enough for routing decision based on the connected route. +It is about longest prefix match algorithm that OVN uses. Without `/32` routes for every joinIP, we have 2 fighting routes for +pod1->joinIP traffic, one with dummy IP `ip4.dst == 100.65.0.0/16` and another one for pod subnet `ip4.src == ` +(this one sends everything to the GR). So which route wins currently depends on the `podSubnet` netmask: +if it is `/16` or larger, dummy IP wins and everything works, otherwise pod subnet route wins and the traffic is blackholed. + +joinIP is not really needed in the new topology, but we will have to keep it during the upgrade, and we can't +remove it after the upgrade is done, because there will be conntrack entries using it. Not too big of a deal, +but a side effect of supporting the upgrade. +There is a way to remove joinIPs eventually (not a part of this enhancement, but leaving here for future reference), +which requires 2 more upgrades: +1. On the next upgrade replace all NATs using joinIPs with the transitIPs (at this point the new topology is used and all nodes understand +transit IPs). This can only be done in the same manner as this upgrade (i.e. after the node reboots) to avoid disruption. +Routes on the pod will also need to be updated. +2. On the next-after-next upgrade we can safely remove joinIPs from the topology, since they shouldn't be used anymore. + +There is another aspect to this: joinSubnet is currently configurable in the Layer2 network spec, and it is exposed to the pods. +So if we get rid of it and replace it with the transitSubnet, we need to remove that field and see if transitSubnet also +needs to be configurable. + +#### Upgrade Details + +1. every node has to identify when it starts using new topology (so that the other nodes can switch from the switch +remote ports to the router remote ports). This can be done with annotations, but it either will have to be per-network +(which is a lot of updates) or it needs to check that absolutely no pods are present on the node for all layer2 networks, +and use node-scope annotation. The only downside of per-node annotation is that if any layer2 network for whatever (buggy) +reason still has a local pod, none of the networks will be upgraded. +Since the whole upgrade procedure relies on pod eviction before node reboot, it is ok to use per-node annotation. +The node will check that it has no running pods, and set the annotation "k8s.ovn.org/layer2-topology-version: 2.0". +2. every node will need to upgrade to the intermediate topology, when it has no running pods (when the node has been drained and rebooted) + - ovnkube is updated on each node, no upgrade action is taken for the layer 2 topology. Networks continue to function as before. + - Once network has rolled out, OpenShift Machine Config Operator (MCO) will start draining and rebooting nodes to update the host OS. + - As each upgraded node comes up, it will modify its layer 2 networks as shown in [the upgrade topology](#egressingress-traffic-over-remote-node), + the upgraded node will refuse to bring up any ovn-networked pods while the topology is updating. + - Other ovnkube pods will see this ovnkube pod's node event, and then reconfigure their remote port to directly connect to the router. + - After all nodes have added their annotation, the nodes will then remove the backwards compatible upgrade topology + ("stotr-layer2-switch-upgrade" port, and the local join subnet address 100.65.255.254) + - During topology upgrade we also need to cleanup/remove old topology parts, like GR-switch ports, old GR routes and NATs, + and these cleanups need to be idempotent and spread across multiple controllers. +3. remove the upgrade topology artifacts (like extra ports or IPs) when all nodes finished upgrading. + +### Testing Details + +* Unit test checking topology will need to be adapted. +* E2e tests should be the same and pass since this is a refactoring not adding or removing features. +* Upgrade tests are the most difficult part in this case. + * Perform an upgrade while using all the ovn-kubernetes layer2 topology features and check that these continue working. + * Make sure old layer2 topology is preserved and keeps working until all pods are removed from the node + * We don't have such upgrade tests yet, and they don't make sense after this upgrade is done, so the suggestion is to + do this testing manually. + * Make sure VM live migration works during the upgrade. + +### Documentation Details + +N/A + +## Risks, Known Limitations and Mitigations + +This topology repositions the NAT, which masquerades management port +traffic, from the gateway router to the `transit_router`. This change +might disrupt name resolution (DNS service access) and local gateway +access to external traffic. + +However, disrupting dns resolution is not a big issue since client would retry the +resolution + +Consider that there are plans to migrate the SNAT from the logical +router to iptables on the nodes. If this occurs, the disruption +would be resolved. Therefore, it might be advisable to await this +enhancement's implementation before proceeding. + +## OVN Kubernetes Version Skew + +which version is this feature planned to be introduced in? +check repo milestones/releases to get this information for +when the next release is planned for + +## Alternatives + +N/A + +## References + +* OVN Transit Router: https://www.ovn.org/support/dist-docs/ovn-nb.5.html diff --git a/go-controller/Makefile b/go-controller/Makefile index 6487d04279..27ebf94c8b 100644 --- a/go-controller/Makefile +++ b/go-controller/Makefile @@ -12,14 +12,14 @@ GOPATH ?= $(shell go env GOPATH) TEST_REPORT_DIR?=$(CURDIR)/_artifacts export TEST_REPORT_DIR GO_VERSION ?= 1.24 -GO_DOCKER_IMG = quay.io/lib/golang:${GO_VERSION} +GO_DOCKER_IMG = quay.io/projectquay/golang:${GO_VERSION} # CONTAINER_RUNNABLE determines if the tests can be run inside a container. It checks to see if # podman/docker is installed on the system. PODMAN ?= $(shell podman -v > /dev/null 2>&1; echo $$?) ifeq ($(PODMAN), 0) -CONTAINER_RUNTIME=podman +CONTAINER_RUNTIME?=podman else -CONTAINER_RUNTIME=docker +CONTAINER_RUNTIME?=docker endif CONTAINER_RUNNABLE ?= $(shell $(CONTAINER_RUNTIME) -v > /dev/null 2>&1; echo $$?) OVN_SCHEMA_VERSION ?= v25.03.1 diff --git a/go-controller/cmd/ovnkube/ovnkube.go b/go-controller/cmd/ovnkube/ovnkube.go index 6ce5de42c9..7d0e40af81 100644 --- a/go-controller/cmd/ovnkube/ovnkube.go +++ b/go-controller/cmd/ovnkube/ovnkube.go @@ -8,6 +8,7 @@ import ( "os/signal" "strings" "sync" + "sync/atomic" "syscall" "text/tabwriter" "text/template" @@ -29,6 +30,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controllermanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb" + libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" ovnnode "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" @@ -486,6 +488,14 @@ func runOvnKube(ctx context.Context, runMode *ovnkubeRunMode, ovnClientset *util clusterManager.Stop() }() } + // when ovnkube is running in ovnkube-controller and ovnkube node mode in the same process, bool is used to inform ovnkube-node that ovnkube-controller + // has sync'd once and changes have propagated to SB DB. ovnkube-node will then remove flows for dropping GARPs. + // Remove when OVN supports native silencing of GARPs on startup: https://issues.redhat.com/browse/FDP-1537 + // isOVNKubeControllerSyncd is true when ovnkube controller has sync and changes are in OVN Southbound database. + var isOVNKubeControllerSyncd *atomic.Bool + if runMode.ovnkubeController && runMode.node && config.OVNKubernetesFeature.EnableEgressIP && config.OVNKubernetesFeature.EnableInterconnect && config.OvnKubeNode.Mode == types.NodeModeFull { + isOVNKubeControllerSyncd = &atomic.Bool{} + } if runMode.ovnkubeController { wg.Add(1) @@ -522,10 +532,20 @@ func runOvnKube(ctx context.Context, runMode *ovnkubeRunMode, ovnClientset *util controllerErr = fmt.Errorf("failed to start network controller: %w", err) return } - // record delay until ready metrics.MetricOVNKubeControllerReadyDuration.Set(time.Since(startTime).Seconds()) + if isOVNKubeControllerSyncd != nil { + klog.Infof("Waiting for OVN northbound database changes to sync to OVN Southbound database") + if err = libovsdbutil.WaitUntilNorthdSyncOnce(ctx, libovsdbOvnNBClient, libovsdbOvnSBClient); err != nil { + controllerErr = fmt.Errorf("failed waiting for northd to sync OVN Northbound DB to Southbound: %v", err) + return + } else { + klog.Infof("OVN northbound database changes synced to OVN Southbound database") + isOVNKubeControllerSyncd.Store(true) + } + } + <-ctx.Done() controllerManager.Stop() }() @@ -569,7 +589,7 @@ func runOvnKube(ctx context.Context, runMode *ovnkubeRunMode, ovnClientset *util return } - err = nodeControllerManager.Start(ctx) + err = nodeControllerManager.Start(ctx, isOVNKubeControllerSyncd) if err != nil { nodeErr = fmt.Errorf("failed to start node network controller: %w", err) return @@ -579,7 +599,7 @@ func runOvnKube(ctx context.Context, runMode *ovnkubeRunMode, ovnClientset *util metrics.MetricNodeReadyDuration.Set(time.Since(startTime).Seconds()) <-ctx.Done() - nodeControllerManager.Stop() + nodeControllerManager.Stop(isOVNKubeControllerSyncd) }() } diff --git a/go-controller/go.mod b/go-controller/go.mod index 9df6b1ccba..04b9a678ec 100644 --- a/go-controller/go.mod +++ b/go-controller/go.mod @@ -26,7 +26,7 @@ require ( github.com/k8snetworkplumbingwg/ipamclaims v0.5.1-alpha github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.7 - github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc + github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20250818105516-24ab680f94f3 github.com/mdlayher/arp v0.0.0-20220512170110-6706a2966875 github.com/mdlayher/ndp v1.0.1 github.com/mdlayher/socket v0.2.1 @@ -42,10 +42,10 @@ require ( github.com/prometheus/client_golang v1.22.0 github.com/prometheus/client_model v0.6.1 github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 - github.com/spf13/afero v1.9.5 + github.com/spf13/afero v1.14.0 github.com/stretchr/testify v1.10.0 github.com/urfave/cli/v2 v2.27.2 - github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa + github.com/vishvananda/netlink v1.3.1 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 golang.org/x/net v0.38.0 golang.org/x/sync v0.12.0 @@ -125,7 +125,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/vishvananda/netns v0.0.4 // indirect + github.com/vishvananda/netns v0.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.opencensus.io v0.24.0 // indirect diff --git a/go-controller/go.sum b/go-controller/go.sum index 9dfe0d36d4..df20a066c9 100644 --- a/go-controller/go.sum +++ b/go-controller/go.sum @@ -4,39 +4,24 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= @@ -136,7 +121,6 @@ github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= @@ -275,7 +259,6 @@ github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -379,8 +362,6 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -410,7 +391,6 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -429,18 +409,11 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= @@ -456,7 +429,6 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -544,8 +516,8 @@ github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 h1:Egj1hEVYNXWFlKpgzA github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1/go.mod h1:kEJ4WM849yNmXekuSXLRwb+LaZ9usC06O8JgoAIq+f4= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.7 h1:z4P744DR+PIpkjwXSEc6TvN3L6LVzmUquFgmNm8wSUc= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.7/go.mod h1:CM7HAH5PNuIsqjMN0fGc1ydM74Uj+0VZFhob620nklw= -github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc h1:v6+jUd70AayPbIRgTYUNpnBLG5cBPTY0+10y80CZeMk= -github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc/go.mod h1:jyWzGe6ZtYiPq6ih6aXCOy6mZ49Y9mNyBOLBBXnli+k= +github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20250818105516-24ab680f94f3 h1:uSGOz0UYNPduUVXLdAthKdRjIaaCUxN8j9R30Kx0JxQ= +github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20250818105516-24ab680f94f3/go.mod h1:UnAcraX3CxamBrn9H/xCLngKOquy5DyGWiupn05x9Ag= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= @@ -555,7 +527,6 @@ github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYW github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -712,7 +683,6 @@ github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -788,8 +758,8 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= +github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -844,13 +814,13 @@ github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y4 github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa h1:iAhToRwOrdk+pKzclvLM7nKZhsg8f7dVrgkFccDUbUw= -github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= +github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0= +github.com/vishvananda/netlink v1.3.1/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -862,9 +832,7 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -881,8 +849,6 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= @@ -929,8 +895,6 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -957,7 +921,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -967,8 +930,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -991,7 +952,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1004,19 +964,13 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= @@ -1024,7 +978,6 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= @@ -1034,10 +987,6 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1046,7 +995,6 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1098,20 +1046,13 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1121,16 +1062,12 @@ golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1206,28 +1143,11 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= @@ -1250,23 +1170,12 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1287,28 +1196,11 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= @@ -1324,15 +1216,9 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= @@ -1420,7 +1306,6 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= diff --git a/go-controller/pkg/allocator/pod/pod_annotation.go b/go-controller/pkg/allocator/pod/pod_annotation.go index d174a5e3d9..eed6bab488 100644 --- a/go-controller/pkg/allocator/pod/pod_annotation.go +++ b/go-controller/pkg/allocator/pod/pod_annotation.go @@ -272,7 +272,7 @@ func allocatePodAnnotationWithRollback( err error) { nadName := types.DefaultNetworkName - if netInfo.IsSecondary() { + if netInfo.IsUserDefinedNetwork() { nadName = util.GetNADName(network.Namespace, network.Name) } podDesc := fmt.Sprintf("%s/%s/%s", nadName, pod.Namespace, pod.Name) @@ -308,6 +308,7 @@ func allocatePodAnnotationWithRollback( }() podAnnotation, _ = util.UnmarshalPodAnnotation(pod.Annotations, nadName) + isNetworkAllocated := podAnnotation != nil if podAnnotation == nil { podAnnotation = &util.PodAnnotation{} } @@ -391,7 +392,7 @@ func allocatePodAnnotationWithRollback( if hasIPAM { if len(tentative.IPs) > 0 { - if err = ipAllocator.AllocateIPs(tentative.IPs); err != nil && !ip.IsErrAllocated(err) { + if err = ipAllocator.AllocateIPs(tentative.IPs); err != nil && !shouldSkipAllocateIPsError(err, isNetworkAllocated, ipamClaim) { err = fmt.Errorf("failed to ensure requested or annotated IPs %v for %s: %w", util.StringSlice(tentative.IPs), podDesc, err) if !reallocateOnNonStaticIPRequest { @@ -402,7 +403,7 @@ func allocatePodAnnotationWithRollback( tentative.IPs = nil } - if err == nil && !hasIPAMClaim { // if we have persistentIPs, we should *not* release them on rollback + if err == nil && (!hasIPAMClaim || !isNetworkAllocated) { // copy the IPs that would need to be released releaseIPs = util.CopyIPNets(tentative.IPs) } @@ -510,7 +511,7 @@ func AddRoutesGatewayIP( // generate the nodeSubnets from the allocated IPs nodeSubnets := util.IPsToNetworkIPs(podAnnotation.IPs...) - if netinfo.IsSecondary() { + if netinfo.IsUserDefinedNetwork() { // for secondary network, see if its network-attachment's annotation has default-route key. // If present, then we need to add default route for it podAnnotation.Gateways = append(podAnnotation.Gateways, network.GatewayRequest...) @@ -643,3 +644,30 @@ func AddRoutesGatewayIP( return nil } + +// shouldSkipAllocateIPsError determines whether to skip/ignore IP allocation errors +// in scenarios where IPs may already be legitimately allocated. +// Returns false if the error is not ErrAllocated or if none of the skip conditions are met. True otherwise. +func shouldSkipAllocateIPsError(err error, networkAllocated bool, ipamClaim *ipamclaimsapi.IPAMClaim) bool { + // Only skip if it's an "already allocated" error + if !ip.IsErrAllocated(err) { + return false + } + + // If PreconfiguredUDNAddressesEnabled is disabled, always skip ErrAllocated + if !util.IsPreconfiguredUDNAddressesEnabled() { + return true + } + + // Always skip ErrAllocated if network annotation already persisted on pod + if networkAllocated { + return true + } + + // For persistent IP VM/Pods, if IPAMClaim already has IPs allocated, then ip already allocated, skip ErrAllocated + if ipamClaim != nil && len(ipamClaim.Status.IPs) > 0 { + return true + } + + return false +} diff --git a/go-controller/pkg/allocator/pod/pod_annotation_test.go b/go-controller/pkg/allocator/pod/pod_annotation_test.go index e4642ac027..f8044b62fa 100644 --- a/go-controller/pkg/allocator/pod/pod_annotation_test.go +++ b/go-controller/pkg/allocator/pod/pod_annotation_test.go @@ -718,6 +718,10 @@ func Test_allocatePodAnnotationWithRollback(t *testing.T) { name: "IPAM persistent IPs, IP address re-use", ipam: true, persistentIPAllocation: true, + podAnnotation: &util.PodAnnotation{ + IPs: ovntest.MustParseIPNets("192.168.0.200/24"), + MAC: util.IPAddrToHWAddr(ovntest.MustParseIPNets("192.168.0.200/24")[0].IP), + }, args: args{ network: &nadapi.NetworkSelectionElement{ IPAMClaimReference: "my-ipam-claim", @@ -734,7 +738,6 @@ func Test_allocatePodAnnotationWithRollback(t *testing.T) { nextIPs: ovntest.MustParseIPNets("192.168.0.3/24"), }, }, - wantUpdatedPod: true, wantPodAnnotation: &util.PodAnnotation{ IPs: ovntest.MustParseIPNets("192.168.0.200/24"), MAC: util.IPAddrToHWAddr(ovntest.MustParseIPNets("192.168.0.200/24")[0].IP), @@ -880,6 +883,138 @@ func Test_allocatePodAnnotationWithRollback(t *testing.T) { wantErr: true, wantReleaseID: true, }, + { + // Test ErrAllocated is always skipped with EnablePreconfiguredUDNAddresses disabled (legacy behavior) + name: "ErrAllocated should be skipped when EnablePreconfiguredUDNAddresses disabled", + ipam: true, + persistentIPAllocation: true, + enablePreconfiguredUDNAddresses: false, + args: args{ + network: &nadapi.NetworkSelectionElement{ + IPAMClaimReference: "my-ipam-claim", + }, + ipamClaim: &ipamclaimsapi.IPAMClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-ipam-claim", + }, + Status: ipamclaimsapi.IPAMClaimStatus{ + IPs: []string{"192.168.0.200/24"}, + }, + }, + ipAllocator: &ipAllocatorStub{ + nextIPs: ovntest.MustParseIPNets("192.168.0.200/24"), + allocateIPsError: ipam.ErrAllocated, + }, + }, + wantUpdatedPod: true, + wantPodAnnotation: &util.PodAnnotation{ + IPs: ovntest.MustParseIPNets("192.168.0.200/24"), + MAC: util.IPAddrToHWAddr(ovntest.MustParseIPNets("192.168.0.200/24")[0].IP), + Gateways: []net.IP{ovntest.MustParseIP("192.168.0.1").To4()}, + Routes: []util.PodRoute{ + { + Dest: &net.IPNet{ + IP: ovntest.MustParseIP("100.65.0.0").To4(), + Mask: net.CIDRMask(16, 32), + }, + NextHop: ovntest.MustParseIP("192.168.0.1").To4(), + }, + }, + Role: types.NetworkRolePrimary, + }, + // With legacy behavior (feature flag disabled), IPs should NOT be tracked for rollback when hasIPAMClaim is true + role: types.NetworkRolePrimary, + }, + { + // Test ErrAllocated with EnablePreconfiguredUDNAddresses enabled and network annotation persisted - should not fail with ErrAllocated + name: "Pod with persisted annotation should skip ErrAllocated", + ipam: true, + persistentIPAllocation: true, + enablePreconfiguredUDNAddresses: true, + podAnnotation: &util.PodAnnotation{ + IPs: ovntest.MustParseIPNets("192.168.0.150/24"), + MAC: util.IPAddrToHWAddr(ovntest.MustParseIPNets("192.168.0.150/24")[0].IP), + }, + args: args{ + ipAllocator: &ipAllocatorStub{ + nextIPs: ovntest.MustParseIPNets("192.168.0.3/24"), + allocateIPsError: ipam.ErrAllocated, // Should be skipped because network already allocated + }, + }, + wantPodAnnotation: &util.PodAnnotation{ + IPs: ovntest.MustParseIPNets("192.168.0.150/24"), + MAC: util.IPAddrToHWAddr(ovntest.MustParseIPNets("192.168.0.150/24")[0].IP), + }, + // No wantUpdatedPod because annotation already exists and no changes needed + }, + { + // Test VM restart/migration case: new pod spawned with no network annotation but IPAMClaim has IPs + name: "VM restart/migration new pod with IPAMClaim IPs should skip ErrAllocated", + ipam: true, + persistentIPAllocation: true, + enablePreconfiguredUDNAddresses: true, + args: args{ + network: &nadapi.NetworkSelectionElement{ + IPAMClaimReference: "vm-ipam-claim", + }, + ipamClaim: &ipamclaimsapi.IPAMClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vm-ipam-claim", + }, + Status: ipamclaimsapi.IPAMClaimStatus{ + IPs: []string{"192.168.0.250/24"}, // IPAMClaim has IPs from previous pod + }, + }, + ipAllocator: &ipAllocatorStub{ + nextIPs: ovntest.MustParseIPNets("192.168.0.3/24"), + allocateIPsError: ipam.ErrAllocated, // Should be skipped because IPAMClaim has IPs + }, + }, + wantUpdatedPod: true, + wantPodAnnotation: &util.PodAnnotation{ + IPs: ovntest.MustParseIPNets("192.168.0.250/24"), + MAC: util.IPAddrToHWAddr(ovntest.MustParseIPNets("192.168.0.250/24")[0].IP), + Gateways: []net.IP{ovntest.MustParseIP("192.168.0.1").To4()}, + Routes: []util.PodRoute{ + { + Dest: &net.IPNet{ + IP: ovntest.MustParseIP("100.65.0.0").To4(), + Mask: net.CIDRMask(16, 32), + }, + NextHop: ovntest.MustParseIP("192.168.0.1").To4(), + }, + }, + Role: types.NetworkRolePrimary, + }, + role: types.NetworkRolePrimary, + }, + { + // Test ErrAllocated when pod with no annotation and IPAMClaim has no IPs allocated yet - should fail on ErrAllocated + name: "New pod with IPAMClaim but no IPs yet should fail on ErrAllocated", + ipam: true, + persistentIPAllocation: true, + enablePreconfiguredUDNAddresses: true, + args: args{ + network: &nadapi.NetworkSelectionElement{ + IPAMClaimReference: "empty-ipam-claim", + IPRequest: []string{"192.168.0.100/24"}, // Request specific IP to trigger AllocateIPs call + }, + reallocate: false, // Don't reallocate on error + ipamClaim: &ipamclaimsapi.IPAMClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "empty-ipam-claim", + }, + Status: ipamclaimsapi.IPAMClaimStatus{ + IPs: []string{}, // No IPs allocated yet + }, + }, + ipAllocator: &ipAllocatorStub{ + nextIPs: ovntest.MustParseIPNets("192.168.0.3/24"), + allocateIPsError: ipam.ErrAllocated, // Should NOT be skipped, should cause failure + }, + }, + wantErr: true, // Should fail because ErrAllocated is not skipped + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/go-controller/pkg/clustermanager/clustermanager.go b/go-controller/pkg/clustermanager/clustermanager.go index 281fb000ec..b382cb5212 100644 --- a/go-controller/pkg/clustermanager/clustermanager.go +++ b/go-controller/pkg/clustermanager/clustermanager.go @@ -27,13 +27,13 @@ import ( // ClusterManager structure is the object which manages the cluster nodes. // It creates a default network controller for the default network and a -// secondary network cluster controller manager to manage the multi networks. +// user-defined network cluster controller manager to manage the multi networks. type ClusterManager struct { client clientset.Interface defaultNetClusterController *networkClusterController zoneClusterController *zoneClusterController wf *factory.WatchFactory - secondaryNetClusterManager *secondaryNetworkClusterManager + udnClusterManager *userDefinedNetworkClusterManager // Controller used for programming node allocation for egress IP // The OVN DB setup is handled by egressIPZoneController that runs in ovnkube-controller eIPC *egressIPClusterController @@ -90,7 +90,7 @@ func NewClusterManager( return nil, err } - cm.secondaryNetClusterManager, err = newSecondaryNetworkClusterManager(ovnClient, wf, cm.networkManager.Interface(), recorder) + cm.udnClusterManager, err = newUserDefinedNetworkClusterManager(ovnClient, wf, cm.networkManager.Interface(), recorder) if err != nil { return nil, err } @@ -152,8 +152,8 @@ func NewClusterManager( cm.recorder, ) cm.userDefinedNetworkController = udnController - if cm.secondaryNetClusterManager != nil { - cm.secondaryNetClusterManager.SetNetworkStatusReporter(udnController.UpdateSubsystemCondition) + if cm.udnClusterManager != nil { + cm.udnClusterManager.SetNetworkStatusReporter(udnController.UpdateSubsystemCondition) } } @@ -258,7 +258,7 @@ func (cm *ClusterManager) Stop() { } func (cm *ClusterManager) NewNetworkController(netInfo util.NetInfo) (networkmanager.NetworkController, error) { - return cm.secondaryNetClusterManager.NewNetworkController(netInfo) + return cm.udnClusterManager.NewNetworkController(netInfo) } func (cm *ClusterManager) GetDefaultNetworkController() networkmanager.ReconcilableNetworkController { @@ -266,7 +266,7 @@ func (cm *ClusterManager) GetDefaultNetworkController() networkmanager.Reconcila } func (cm *ClusterManager) CleanupStaleNetworks(validNetworks ...util.NetInfo) error { - return cm.secondaryNetClusterManager.CleanupStaleNetworks(validNetworks...) + return cm.udnClusterManager.CleanupStaleNetworks(validNetworks...) } func (cm *ClusterManager) Reconcile(name string, old, new util.NetInfo) error { diff --git a/go-controller/pkg/clustermanager/network_cluster_controller.go b/go-controller/pkg/clustermanager/network_cluster_controller.go index f8241aadca..219fb6d9d9 100644 --- a/go-controller/pkg/clustermanager/network_cluster_controller.go +++ b/go-controller/pkg/clustermanager/network_cluster_controller.go @@ -154,7 +154,7 @@ func (ncc *networkClusterController) hasNodeAllocation() bool { return config.OVNKubernetesFeature.EnableInterconnect default: // we need to allocate network IDs and subnets - return !ncc.IsSecondary() + return !ncc.IsUserDefinedNetwork() } } @@ -452,10 +452,10 @@ func (ncc *networkClusterController) newRetryFramework(objectType reflect.Type, return objretry.NewRetryFramework(ncc.stopChan, ncc.wg, ncc.watchFactory, resourceHandler) } -// Cleanup the subnet annotations from the node for the secondary networks +// Cleanup the subnet annotations from the node for the User Defined Networks func (ncc *networkClusterController) Cleanup() error { - if !ncc.IsSecondary() { - return fmt.Errorf("default network can't be cleaned up") + if !ncc.IsUserDefinedNetwork() { + return fmt.Errorf("default network cannot be cleaned up") } if ncc.hasNodeAllocation() { diff --git a/go-controller/pkg/clustermanager/node/node_allocator.go b/go-controller/pkg/clustermanager/node/node_allocator.go index ab4f950035..e31625b725 100644 --- a/go-controller/pkg/clustermanager/node/node_allocator.go +++ b/go-controller/pkg/clustermanager/node/node_allocator.go @@ -124,11 +124,11 @@ func (na *NodeAllocator) CleanupStaleAnnotation() { func (na *NodeAllocator) hasHybridOverlayAllocation() bool { // When config.HybridOverlay.ClusterSubnets is empty, assume the subnet allocation will be managed by an external component. - return config.HybridOverlay.Enabled && !na.netInfo.IsSecondary() && len(config.HybridOverlay.ClusterSubnets) > 0 + return config.HybridOverlay.Enabled && !na.netInfo.IsUserDefinedNetwork() && len(config.HybridOverlay.ClusterSubnets) > 0 } func (na *NodeAllocator) hasHybridOverlayAllocationUnmanaged() bool { - return config.HybridOverlay.Enabled && !na.netInfo.IsSecondary() && len(config.HybridOverlay.ClusterSubnets) == 0 + return config.HybridOverlay.Enabled && !na.netInfo.IsUserDefinedNetwork() && len(config.HybridOverlay.ClusterSubnets) == 0 } func (na *NodeAllocator) recordSubnetCount() { @@ -595,7 +595,7 @@ func (na *NodeAllocator) allocateNodeSubnets(allocator SubnetAllocator, nodeName func (na *NodeAllocator) hasNodeSubnetAllocation() bool { // we only allocate subnets for L3 secondary network or default network - return na.netInfo.TopologyType() == types.Layer3Topology || !na.netInfo.IsSecondary() + return na.netInfo.TopologyType() == types.Layer3Topology || !na.netInfo.IsUserDefinedNetwork() } func (na *NodeAllocator) markAllocatedNetworksForUnmanagedHONode(node *corev1.Node) error { diff --git a/go-controller/pkg/clustermanager/pod/allocator.go b/go-controller/pkg/clustermanager/pod/allocator.go index 5bd9cafcc1..bd9c28c956 100644 --- a/go-controller/pkg/clustermanager/pod/allocator.go +++ b/go-controller/pkg/clustermanager/pod/allocator.go @@ -364,7 +364,8 @@ func (a *PodAllocator) allocatePodOnNAD(pod *corev1.Pod, nad string, network *ne ) if err != nil { - if errors.Is(err, ipallocator.ErrFull) { + if errors.Is(err, ipallocator.ErrFull) || + errors.Is(err, ipallocator.ErrAllocated) { a.recordPodErrorEvent(pod, err) } return err diff --git a/go-controller/pkg/clustermanager/secondary_network_cluster_manager.go b/go-controller/pkg/clustermanager/user_defined_network_cluster_manager.go similarity index 81% rename from go-controller/pkg/clustermanager/secondary_network_cluster_manager.go rename to go-controller/pkg/clustermanager/user_defined_network_cluster_manager.go index f77aafd7bc..bae096a6d4 100644 --- a/go-controller/pkg/clustermanager/secondary_network_cluster_manager.go +++ b/go-controller/pkg/clustermanager/user_defined_network_cluster_manager.go @@ -14,10 +14,10 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) -// secondaryNetworkClusterManager object manages the multi net-attach-def controllers. +// userDefinedNetworkClusterManager object manages the multi net-attach-def controllers. // It implements networkmanager.ControllerManager interface and can be used // by network manager to create and delete network controllers. -type secondaryNetworkClusterManager struct { +type userDefinedNetworkClusterManager struct { // networkManager creates and deletes network controllers networkManager networkmanager.Interface ovnClient *util.OVNClusterManagerClientset @@ -29,14 +29,14 @@ type secondaryNetworkClusterManager struct { errorReporter NetworkStatusReporter } -func newSecondaryNetworkClusterManager( +func newUserDefinedNetworkClusterManager( ovnClient *util.OVNClusterManagerClientset, wf *factory.WatchFactory, networkManager networkmanager.Interface, recorder record.EventRecorder, -) (*secondaryNetworkClusterManager, error) { - klog.Infof("Creating secondary network cluster manager") - sncm := &secondaryNetworkClusterManager{ +) (*userDefinedNetworkClusterManager, error) { + klog.Infof("Creating user-defined network cluster manager") + sncm := &userDefinedNetworkClusterManager{ ovnClient: ovnClient, watchFactory: wf, networkManager: networkManager, @@ -45,17 +45,17 @@ func newSecondaryNetworkClusterManager( return sncm, nil } -func (sncm *secondaryNetworkClusterManager) SetNetworkStatusReporter(errorReporter NetworkStatusReporter) { +func (sncm *userDefinedNetworkClusterManager) SetNetworkStatusReporter(errorReporter NetworkStatusReporter) { sncm.errorReporter = errorReporter } -func (sncm *secondaryNetworkClusterManager) GetDefaultNetworkController() networkmanager.ReconcilableNetworkController { +func (sncm *userDefinedNetworkClusterManager) GetDefaultNetworkController() networkmanager.ReconcilableNetworkController { return nil } // NewNetworkController implements the networkmanager.ControllerManager // interface called by network manager to create or delete a network controller. -func (sncm *secondaryNetworkClusterManager) NewNetworkController(nInfo util.NetInfo) (networkmanager.NetworkController, error) { +func (sncm *userDefinedNetworkClusterManager) NewNetworkController(nInfo util.NetInfo) (networkmanager.NetworkController, error) { if !sncm.isTopologyManaged(nInfo) { return nil, networkmanager.ErrNetworkControllerTopologyNotManaged } @@ -73,7 +73,7 @@ func (sncm *secondaryNetworkClusterManager) NewNetworkController(nInfo util.NetI return sncc, nil } -func (sncm *secondaryNetworkClusterManager) isTopologyManaged(nInfo util.NetInfo) bool { +func (sncm *userDefinedNetworkClusterManager) isTopologyManaged(nInfo util.NetInfo) bool { switch nInfo.TopologyType() { case ovntypes.Layer3Topology: // we need to allocate subnets to each node regardless of configuration @@ -93,7 +93,7 @@ func (sncm *secondaryNetworkClusterManager) isTopologyManaged(nInfo util.NetInfo // CleanupStaleNetworks cleans of stale data from the OVN database // corresponding to networks not included in validNetworks, which are considered // stale. -func (sncm *secondaryNetworkClusterManager) CleanupStaleNetworks(validNetworks ...util.NetInfo) error { +func (sncm *userDefinedNetworkClusterManager) CleanupStaleNetworks(validNetworks ...util.NetInfo) error { existingNetworksMap := map[string]struct{}{} for _, network := range validNetworks { existingNetworksMap[network.GetNetworkName()] = struct{}{} @@ -147,7 +147,7 @@ func (sncm *secondaryNetworkClusterManager) CleanupStaleNetworks(validNetworks . } // newDummyNetworkController creates a dummy network controller used to clean up specific network -func (sncm *secondaryNetworkClusterManager) newDummyLayer3NetworkController(netName string) (networkmanager.NetworkController, error) { +func (sncm *userDefinedNetworkClusterManager) newDummyLayer3NetworkController(netName string) (networkmanager.NetworkController, error) { netInfo, _ := util.NewNetInfo(&ovncnitypes.NetConf{NetConf: types.NetConf{Name: netName}, Topology: ovntypes.Layer3Topology}) nc := newNetworkClusterController( netInfo, diff --git a/go-controller/pkg/clustermanager/secondary_network_unit_test.go b/go-controller/pkg/clustermanager/user_defined_network_unit_test.go similarity index 95% rename from go-controller/pkg/clustermanager/secondary_network_unit_test.go rename to go-controller/pkg/clustermanager/user_defined_network_unit_test.go index 14c2cbaa8d..a3a9af59e8 100644 --- a/go-controller/pkg/clustermanager/secondary_network_unit_test.go +++ b/go-controller/pkg/clustermanager/user_defined_network_unit_test.go @@ -57,8 +57,8 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { wg.Wait() }) - ginkgo.Context("Secondary networks", func() { - ginkgo.It("Attach secondary layer3 network", func() { + ginkgo.Context("User-Defined Networks", func() { + ginkgo.It("Attach layer3 UDN", func() { app.Action = func(ctx *cli.Context) error { kubeFakeClient := fake.NewSimpleClientset(&corev1.NodeList{Items: nodes()}) fakeClient := &util.OVNClusterManagerClientset{ @@ -74,7 +74,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { err = f.Start() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - sncm, err := newSecondaryNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) + sncm, err := newUserDefinedNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) gomega.Expect(err).NotTo(gomega.HaveOccurred()) netInfo, err := util.NewNetInfo(&ovncnitypes.NetConf{NetConf: types.NetConf{Name: "blue"}, Topology: ovntypes.Layer3Topology, Subnets: "192.168.0.0/16/24"}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -130,7 +130,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - ginkgo.It("The secondary network controller starts successfully", func() { + ginkgo.It("The UDN controller starts successfully", func() { app.Action = func(ctx *cli.Context) error { gomega.Expect( initConfig(ctx, config.OVNKubernetesFeatureConfig{ @@ -143,7 +143,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { err = f.Start() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - sncm, err := newSecondaryNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) + sncm, err := newUserDefinedNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) gomega.Expect(err).NotTo(gomega.HaveOccurred()) config.OVNKubernetesFeature.EnableInterconnect = false @@ -181,7 +181,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(f.Start()).NotTo(gomega.HaveOccurred()) - sncm, err := newSecondaryNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) + sncm, err := newUserDefinedNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) gomega.Expect(err).NotTo(gomega.HaveOccurred()) nc := newNetworkClusterController( @@ -226,7 +226,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { }) ginkgo.DescribeTable( - "the secondary network controller", + "the UDN controller", func(netConf *ovncnitypes.NetConf, featureConfig config.OVNKubernetesFeatureConfig, expectedError error) { var err error netInfo, err = util.NewNetInfo(netConf) @@ -240,7 +240,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { err = f.Start() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - sncm, err := newSecondaryNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) + sncm, err := newUserDefinedNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) gomega.Expect(err).NotTo(gomega.HaveOccurred()) _, err = sncm.NewNetworkController(netInfo) @@ -378,7 +378,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { gomega.Eventually(checkNodeAnnotations).ShouldNot(gomega.HaveOccurred()) - sncm, err := newSecondaryNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) + sncm, err := newUserDefinedNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Create a fake nad controller for blue network so that the red network gets cleared @@ -476,7 +476,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(f.Start()).To(gomega.Succeed()) - sncm, err := newSecondaryNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) + sncm, err := newUserDefinedNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) gomega.Expect(err).NotTo(gomega.HaveOccurred()) nc := newNetworkClusterController( @@ -529,7 +529,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(f.Start()).To(gomega.Succeed()) - sncm, err := newSecondaryNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) + sncm, err := newUserDefinedNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) gomega.Expect(err).NotTo(gomega.HaveOccurred()) nc := newNetworkClusterController( @@ -578,7 +578,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(f.Start()).To(gomega.Succeed()) - sncm, err := newSecondaryNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) + sncm, err := newUserDefinedNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) gomega.Expect(err).NotTo(gomega.HaveOccurred()) nc := newNetworkClusterController( @@ -647,7 +647,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(f.Start()).To(gomega.Succeed()) - sncm, err := newSecondaryNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) + sncm, err := newUserDefinedNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) gomega.Expect(err).NotTo(gomega.HaveOccurred()) nc := newNetworkClusterController( @@ -719,7 +719,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(f.Start()).To(gomega.Succeed()) - sncm, err := newSecondaryNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) + sncm, err := newUserDefinedNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) gomega.Expect(err).NotTo(gomega.HaveOccurred()) nc := newNetworkClusterController( @@ -785,7 +785,7 @@ var _ = ginkgo.Describe("Cluster Controller Manager", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(f.Start()).NotTo(gomega.HaveOccurred()) - sncm, err := newSecondaryNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) + sncm, err := newUserDefinedNetworkClusterManager(fakeClient, f, networkmanager.Default().Interface(), recorder) gomega.Expect(err).NotTo(gomega.HaveOccurred()) nc := newNetworkClusterController( diff --git a/go-controller/pkg/cni/helper_linux.go b/go-controller/pkg/cni/helper_linux.go index fdad828cab..dca3bfaa09 100644 --- a/go-controller/pkg/cni/helper_linux.go +++ b/go-controller/pkg/cni/helper_linux.go @@ -434,7 +434,7 @@ func ConfigureOVS(ctx context.Context, namespace, podName, hostIfaceName string, ifaceID := util.GetIfaceId(namespace, podName) if ifInfo.NetName != types.DefaultNetworkName { - ifaceID = util.GetSecondaryNetworkIfaceId(namespace, podName, ifInfo.NADName) + ifaceID = util.GetUDNIfaceId(namespace, podName, ifInfo.NADName) } initialPodUID := ifInfo.PodUID ipStrs := make([]string, len(ifInfo.IPs)) diff --git a/go-controller/pkg/cni/types.go b/go-controller/pkg/cni/types.go index 332898ac03..f405954721 100644 --- a/go-controller/pkg/cni/types.go +++ b/go-controller/pkg/cni/types.go @@ -164,7 +164,7 @@ type PodRequest struct { // network name, for default network, this will be types.DefaultNetworkName netName string - // for ovs interfaces plumbed for secondary networks, their iface-id's prefix is derived from the specific nadName; + // for ovs interfaces plumbed for UDNs, their iface-id's prefix is derived from the specific nadName; // also, need to find the pod annotation, dpu pod connection/status annotations of the given NAD ("default" // for default network). nadName string diff --git a/go-controller/pkg/cni/utils.go b/go-controller/pkg/cni/utils.go index 2f063c6aea..542f1813d3 100644 --- a/go-controller/pkg/cni/utils.go +++ b/go-controller/pkg/cni/utils.go @@ -139,7 +139,7 @@ func PodAnnotation2PodInfo(podAnnotation map[string]string, podNADAnnotation *ut podInterfaceInfo := &PodInterfaceInfo{ PodAnnotation: *podNADAnnotation, MTU: mtu, - RoutableMTU: config.Default.RoutableMTU, // TBD, configurable for secondary network? + RoutableMTU: config.Default.RoutableMTU, // TBD, configurable for UDNs? Ingress: ingress, Egress: egress, IsDPUHostMode: config.OvnKubeNode.Mode == types.NodeModeDPUHost, diff --git a/go-controller/pkg/controllermanager/controller_manager.go b/go-controller/pkg/controllermanager/controller_manager.go index 55aaec6831..6597e381ca 100644 --- a/go-controller/pkg/controllermanager/controller_manager.go +++ b/go-controller/pkg/controllermanager/controller_manager.go @@ -70,7 +70,7 @@ type ControllerManager struct { func (cm *ControllerManager) NewNetworkController(nInfo util.NetInfo) (networkmanager.NetworkController, error) { // Pass a shallow clone of the watch factory, this allows multiplexing - // informers for secondary networks. + // informers for user-defined networks. cnci, err := cm.newCommonNetworkControllerInfo(cm.watchFactory.ShallowClone()) if err != nil { return nil, fmt.Errorf("failed to create network controller info %w", err) @@ -78,11 +78,11 @@ func (cm *ControllerManager) NewNetworkController(nInfo util.NetInfo) (networkma topoType := nInfo.TopologyType() switch topoType { case ovntypes.Layer3Topology: - return ovn.NewSecondaryLayer3NetworkController(cnci, nInfo, cm.networkManager.Interface(), cm.routeImportManager, cm.eIPController, cm.portCache) + return ovn.NewLayer3UserDefinedNetworkController(cnci, nInfo, cm.networkManager.Interface(), cm.routeImportManager, cm.eIPController, cm.portCache) case ovntypes.Layer2Topology: - return ovn.NewSecondaryLayer2NetworkController(cnci, nInfo, cm.networkManager.Interface(), cm.routeImportManager, cm.portCache, cm.eIPController) + return ovn.NewLayer2UserDefinedNetworkController(cnci, nInfo, cm.networkManager.Interface(), cm.routeImportManager, cm.portCache, cm.eIPController) case ovntypes.LocalnetTopology: - return ovn.NewSecondaryLocalnetNetworkController(cnci, nInfo, cm.networkManager.Interface()), nil + return ovn.NewLocalnetUserDefinedNetworkController(cnci, nInfo, cm.networkManager.Interface()), nil } return nil, fmt.Errorf("topology type %s not supported", topoType) } @@ -90,7 +90,7 @@ func (cm *ControllerManager) NewNetworkController(nInfo util.NetInfo) (networkma // newDummyNetworkController creates a dummy network controller used to clean up specific network func (cm *ControllerManager) newDummyNetworkController(topoType, netName string) (networkmanager.NetworkController, error) { // Pass a shallow clone of the watch factory, this allows multiplexing - // informers for secondary networks. + // informers for user-defined Networks. cnci, err := cm.newCommonNetworkControllerInfo(cm.watchFactory.ShallowClone()) if err != nil { return nil, fmt.Errorf("failed to create network controller info %w", err) @@ -98,11 +98,11 @@ func (cm *ControllerManager) newDummyNetworkController(topoType, netName string) netInfo, _ := util.NewNetInfo(&ovncnitypes.NetConf{NetConf: types.NetConf{Name: netName}, Topology: topoType}) switch topoType { case ovntypes.Layer3Topology: - return ovn.NewSecondaryLayer3NetworkController(cnci, netInfo, cm.networkManager.Interface(), cm.routeImportManager, cm.eIPController, cm.portCache) + return ovn.NewLayer3UserDefinedNetworkController(cnci, netInfo, cm.networkManager.Interface(), cm.routeImportManager, cm.eIPController, cm.portCache) case ovntypes.Layer2Topology: - return ovn.NewSecondaryLayer2NetworkController(cnci, netInfo, cm.networkManager.Interface(), cm.routeImportManager, cm.portCache, cm.eIPController) + return ovn.NewLayer2UserDefinedNetworkController(cnci, netInfo, cm.networkManager.Interface(), cm.routeImportManager, cm.portCache, cm.eIPController) case ovntypes.LocalnetTopology: - return ovn.NewSecondaryLocalnetNetworkController(cnci, netInfo, cm.networkManager.Interface()), nil + return ovn.NewLocalnetUserDefinedNetworkController(cnci, netInfo, cm.networkManager.Interface()), nil } return nil, fmt.Errorf("topology type %s not supported", topoType) } diff --git a/go-controller/pkg/controllermanager/node_controller_manager.go b/go-controller/pkg/controllermanager/node_controller_manager.go index 94fbf18fd2..e183e3b3fc 100644 --- a/go-controller/pkg/controllermanager/node_controller_manager.go +++ b/go-controller/pkg/controllermanager/node_controller_manager.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" "sync" + "sync/atomic" "time" "k8s.io/apimachinery/pkg/util/sets" @@ -40,7 +41,7 @@ type NodeControllerManager struct { defaultNodeNetworkController *node.DefaultNodeNetworkController - // networkManager creates and deletes secondary network controllers + // networkManager creates and deletes user-defined network controllers networkManager networkmanager.Controller // vrf manager that creates and manages vrfs for all UDNs vrfManager *vrfmanager.Controller @@ -52,14 +53,14 @@ type NodeControllerManager struct { ovsClient client.Client } -// NewNetworkController create secondary node network controllers for the given NetInfo +// NewNetworkController create node user-defined network controllers for the given NetInfo func (ncm *NodeControllerManager) NewNetworkController(nInfo util.NetInfo) (networkmanager.NetworkController, error) { topoType := nInfo.TopologyType() switch topoType { case ovntypes.Layer3Topology, ovntypes.Layer2Topology, ovntypes.LocalnetTopology: // Pass a shallow clone of the watch factory, this allows multiplexing - // informers for secondary networks. - return node.NewSecondaryNodeNetworkController(ncm.newCommonNetworkControllerInfo(ncm.watchFactory.(*factory.WatchFactory).ShallowClone()), + // informers for UDNs. + return node.NewUserDefinedNodeNetworkController(ncm.newCommonNetworkControllerInfo(ncm.watchFactory.(*factory.WatchFactory).ShallowClone()), nInfo, ncm.networkManager.Interface(), ncm.vrfManager, ncm.ruleManager, ncm.defaultNodeNetworkController.Gateway) } return nil, fmt.Errorf("topology type %s not supported", topoType) @@ -69,7 +70,7 @@ func (ncm *NodeControllerManager) GetDefaultNetworkController() networkmanager.R return ncm.defaultNodeNetworkController } -// CleanupStaleNetworks cleans up all stale entities giving list of all existing secondary network controllers +// CleanupStaleNetworks cleans up all stale entities giving list of all existing node UDN controllers func (ncm *NodeControllerManager) CleanupStaleNetworks(validNetworks ...util.NetInfo) error { if !util.IsNetworkSegmentationSupportEnabled() { return nil @@ -91,8 +92,8 @@ func (ncm *NodeControllerManager) newCommonNetworkControllerInfo(wf factory.Node // isNetworkManagerRequiredForNode checks if network manager should be started // on the node side, which requires any of the following conditions: -// (1) dpu mode is enabled when secondary networks feature is enabled -// (2) primary user defined networks is enabled (all modes) +// (1) dpu mode is enabled when multiple networks feature is enabled +// (2) primary user-defined networks is enabled (all modes) func isNetworkManagerRequiredForNode() bool { return (config.OVNKubernetesFeature.EnableMultiNetwork && config.OvnKubeNode.Mode == ovntypes.NodeModeDPU) || util.IsNetworkSegmentationSupportEnabled() || @@ -114,7 +115,7 @@ func NewNodeControllerManager(ovnClient *util.OVNClientset, wf factory.NodeWatch ovsClient: ovsClient, } - // need to configure OVS interfaces for Pods on secondary networks in the DPU mode + // need to configure OVS interfaces for Pods on UDNs in the DPU mode // need to start NAD controller on node side for programming gateway pieces for UDNs // need to start NAD controller on node side for VRF awareness with BGP var err error @@ -147,7 +148,7 @@ func (ncm *NodeControllerManager) initDefaultNodeNetworkController(ctx context.C } // Start the node network controller manager -func (ncm *NodeControllerManager) Start(ctx context.Context) (err error) { +func (ncm *NodeControllerManager) Start(ctx context.Context, isOVNKubeControllerSyncd *atomic.Bool) (err error) { klog.Infof("Starting the node network controller manager, Mode: %s", config.OvnKubeNode.Mode) // Initialize OVS exec runner; find OVS binaries that the CNI code uses. @@ -166,7 +167,7 @@ func (ncm *NodeControllerManager) Start(ctx context.Context) (err error) { defer func() { if err != nil { klog.Errorf("Stopping node network controller manager, err=%v", err) - ncm.Stop() + ncm.Stop(isOVNKubeControllerSyncd) } }() @@ -224,15 +225,46 @@ func (ncm *NodeControllerManager) Start(ctx context.Context) (err error) { return fmt.Errorf("failed to own priority %d for IP rules: %v", node.UDNMasqueradeIPRulePriority, err) } } + + // start workaround and remove when ovn has native support for silencing GARPs for LRPs + // https://issues.redhat.com/browse/FDP-1537 + // when in mode ovnkube controller with node, wait until ovnkube controller is syncd before removing drop flows for GARPs +waitForControllerSyncLoop: + for { + select { + case <-ctx.Done(): + return nil + default: + if isOVNKubeControllerSyncd != nil && !isOVNKubeControllerSyncd.Load() { + klog.V(5).Infof("Waiting for ovnkube controller to start before removing GARP drop flows") + time.Sleep(200 * time.Millisecond) + continue + } + klog.Infof("Removing flows to drop GARP") + ncm.defaultNodeNetworkController.Gateway.SetDefaultBridgeGARPDropFlows(false) + if err := ncm.defaultNodeNetworkController.Gateway.Reconcile(); err != nil { + return fmt.Errorf("failed to reconcile gateway after removing GARP drop flows for ext bridge: %v", err) + } + break waitForControllerSyncLoop + } + } + // end workaround + return nil } // Stop gracefully stops all managed controllers -func (ncm *NodeControllerManager) Stop() { +func (ncm *NodeControllerManager) Stop(isOVNKubeControllerSyncd *atomic.Bool) { // stop stale ovs ports cleanup close(ncm.stopChan) if ncm.defaultNodeNetworkController != nil { + if isOVNKubeControllerSyncd != nil && ncm.defaultNodeNetworkController.Gateway != nil { + ncm.defaultNodeNetworkController.Gateway.SetDefaultBridgeGARPDropFlows(true) + if err := ncm.defaultNodeNetworkController.Gateway.Reconcile(); err != nil { + klog.Errorf("Failed to reconcile gateway after attempting to add flows to the external bridge to drop GARPs: %v", err) + } + } ncm.defaultNodeNetworkController.Stop() } diff --git a/go-controller/pkg/libovsdb/util/northd_sync.go b/go-controller/pkg/libovsdb/util/northd_sync.go new file mode 100644 index 0000000000..40aa212680 --- /dev/null +++ b/go-controller/pkg/libovsdb/util/northd_sync.go @@ -0,0 +1,65 @@ +package util + +import ( + "context" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" + + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" +) + +// WaitUntilNorthdSyncOnce ensures northd has sync'd at least once by increments nb_cfg value in NB DB and waiting +// for northd to copy it to SB DB. Poll SB DB until context is cancelled. +// The expectation is that the data you wish to be sync'd to SB DB has already been written to NB DB so when we get the initial +// nb_cfg value, we know that if we increment that by one and see that value or greater in SB DB, then the data has sync'd. +// All other processes interacting with nb_cfg increment it. This function depends on other processes respecting that. +// No guarantee of any changes in SB DB made after this func. +func WaitUntilNorthdSyncOnce(ctx context.Context, nbClient, sbClient client.Client) error { + // 1. Get value of nb_cfg + // 2. Increment value of nb_cfg + // 3. Wait until value appears in SB DB after northd copies it. + nbGlobal := &nbdb.NBGlobal{} + nbGlobal, err := libovsdbops.GetNBGlobal(nbClient, nbGlobal) + if err != nil { + return fmt.Errorf("failed to find OVN Northbound NB_Global table"+ + " entry: %w", err) + } + // increment nb_cfg value by 1. When northd consumes updates from NB DB, it will copy this value to SB DBs SB_Global table nb_cfg field. + ops, err := nbClient.Where(nbGlobal).Mutate(nbGlobal, model.Mutation{ + Field: &nbGlobal.NbCfg, + Mutator: ovsdb.MutateOperationAdd, + Value: 1, + }) + if err != nil { + return fmt.Errorf("failed to generate ops to mutate nb_cfg: %w", err) + } + expectedNbCfgValue := nbGlobal.NbCfg + 1 + if _, err = libovsdbops.TransactAndCheck(nbClient, ops); err != nil { + return fmt.Errorf("failed to transact to increment nb_cfg: %w", err) + } + sbGlobal := &sbdb.SBGlobal{} + // poll until we see the expected value in SB DB every 5 milliseconds until context is cancelled. + err = wait.PollUntilContextCancel(ctx, time.Millisecond*5, true, func(_ context.Context) (done bool, err error) { + if sbGlobal, err = libovsdbops.GetSBGlobal(sbClient, sbGlobal); err != nil { + // northd hasn't added an entry yet + if errors.Is(err, client.ErrNotFound) { + return false, nil + } + return false, fmt.Errorf("failed to get sb_global table entry from SB DB: %w", err) + } + return sbGlobal.NbCfg >= expectedNbCfgValue, nil // we only need to ensure it is greater than or equal to the expected value + }) + if err != nil { + return fmt.Errorf("failed while waiting for nb_cfg value greater than or equal %d in sb db sb_global table: %w", expectedNbCfgValue, err) + } + return nil +} diff --git a/go-controller/pkg/metrics/ovnkube_controller.go b/go-controller/pkg/metrics/ovnkube_controller.go index 30c846d07c..dd0d559450 100644 --- a/go-controller/pkg/metrics/ovnkube_controller.go +++ b/go-controller/pkg/metrics/ovnkube_controller.go @@ -512,8 +512,8 @@ func RunTimestamp(stopChan <-chan struct{}, sbClient, nbClient libovsdbclient.Cl // RecordPodCreated extracts the scheduled timestamp and records how long it took // us to notice this and set up the pod's scheduling. func RecordPodCreated(pod *corev1.Pod, netInfo util.NetInfo) { - if netInfo.IsSecondary() { - // TBD: no op for secondary network for now, TBD + if netInfo.IsUserDefinedNetwork() { + // TBD: noop for UDN for now return } t := time.Now() @@ -761,8 +761,8 @@ func (pr *PodRecorder) CleanPod(podUID kapimtypes.UID) { } func (pr *PodRecorder) AddLSP(podUID kapimtypes.UID, netInfo util.NetInfo) { - if netInfo.IsSecondary() { - // TBD: no op for secondary network for now, TBD + if netInfo.IsUserDefinedNetwork() { + // TBD: noop for UDN for now return } if pr.queue != nil && !pr.queueFull() { diff --git a/go-controller/pkg/node/base_node_network_controller_dpu.go b/go-controller/pkg/node/base_node_network_controller_dpu.go index 26de1386c1..179a54c148 100644 --- a/go-controller/pkg/node/base_node_network_controller_dpu.go +++ b/go-controller/pkg/node/base_node_network_controller_dpu.go @@ -115,7 +115,7 @@ func (bnnc *BaseNodeNetworkController) watchPodsDPU() (*factory.Handler, error) // add all the Pod's NADs into Pod's nadToDPUCDMap // For default network, NAD name is DefaultNetworkName. nadToDPUCDMap := map[string]*util.DPUConnectionDetails{} - if bnnc.IsSecondary() { + if bnnc.IsUserDefinedNetwork() { if bnnc.IsPrimaryNetwork() { activeNetwork, err = bnnc.networkManager.GetActiveNetworkForNamespace(pod.Namespace) if err != nil { diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go index c8b4ac647e..849740e58c 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -82,6 +82,7 @@ type BridgeConfiguration struct { ofPortPhys string netConfig map[string]*BridgeUDNConfiguration eipMarkIPs *egressip.MarkIPsCache + dropGARP bool } func NewBridgeConfiguration(intfName, nodeName, @@ -110,6 +111,16 @@ func NewBridgeConfiguration(intfName, nodeName, } res.netConfig[types.DefaultNetworkName].Advertised.Store(advertised) + // temp workaround for https://issues.redhat.com/browse/FDP-1537 + // we need to ensure we continue dropping GARPs for any new bridge config if the run mode is ovnkube controller + ovnkube node + IC + single zone node + // FIXME: only add if run mode is ovnkube controller + node in single process + if config.OVNKubernetesFeature.EnableEgressIP && config.OVNKubernetesFeature.EnableInterconnect && config.OvnKubeNode.Mode == types.NodeModeFull { + // drop by default - set to false later when ovnkube controller has sync'd and changes propagated to OVN southbound database + // we should also match on run mode here to ensure ovnkube controller + ovnkube node are running in the same process + res.dropGARP = true + } + // end temp work around + if config.Gateway.GatewayAcceleratedInterface != "" { // Try to get representor for the specified gateway device. // If function succeeds, then it is either a valid switchdev VF or SF, and we can use this accelerated device @@ -477,6 +488,12 @@ func (b *BridgeConfiguration) SetEIPMarkIPs(eipMarkIPs *egressip.MarkIPsCache) { b.eipMarkIPs = eipMarkIPs } +func (b *BridgeConfiguration) SetDropGARP(drop bool) { + b.mutex.Lock() + defer b.mutex.Unlock() + b.dropGARP = drop +} + func gatewayReady(patchPort string) bool { // Get ofport of patchPort ofport, _, err := util.GetOVSOfPort("--if-exists", "get", "interface", patchPort, "ofport") diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go index 8395baf06d..d13568a529 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go @@ -47,10 +47,10 @@ func CheckUDNSvcIsolationOVSFlows(flows []string, netConfig *BridgeUDNConfigurat var protoPrefix string if net2.IsIPv4CIDR(svcCIDR) { mgmtMasqIP = netConfig.V4MasqIPs.ManagementPort.IP.String() - protoPrefix = "ip" + protoPrefix = protoPrefixV4 } else { mgmtMasqIP = netConfig.V6MasqIPs.ManagementPort.IP.String() - protoPrefix = "ip6" + protoPrefix = protoPrefixV6 } var nFlows int @@ -78,11 +78,11 @@ func CheckAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *BridgeUDN if net2.IsIPv4CIDR(svcCIDR) { matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) Expect(err).ToNot(HaveOccurred()) - protoPrefix = "ip" + protoPrefix = protoPrefixV4 } else { matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(true, udnAdvertisedSubnets) Expect(err).ToNot(HaveOccurred()) - protoPrefix = "ip6" + protoPrefix = protoPrefixV6 } var nFlows int @@ -107,11 +107,11 @@ func CheckDefaultSvcIsolationOVSFlows(flows []string, defaultConfig *BridgeUDNCo var masqSubnet string var protoPrefix string if net2.IsIPv4CIDR(svcCIDR) { - protoPrefix = "ip" + protoPrefix = protoPrefixV4 masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() masqSubnet = config.Gateway.V4MasqueradeSubnet } else { - protoPrefix = "ip6" + protoPrefix = protoPrefixV6 masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() masqSubnet = config.Gateway.V6MasqueradeSubnet } diff --git a/go-controller/pkg/node/bridgeconfig/bridgeflows.go b/go-controller/pkg/node/bridgeconfig/bridgeflows.go index 8a858c30e9..76bc6ef597 100644 --- a/go-controller/pkg/node/bridgeconfig/bridgeflows.go +++ b/go-controller/pkg/node/bridgeconfig/bridgeflows.go @@ -14,6 +14,11 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) +const ( + protoPrefixV4 = "ip" + protoPrefixV6 = "ipv6" +) + func (b *BridgeConfiguration) DefaultBridgeFlows(hostSubnets []*net.IPNet, extraIPs []net.IP) ([]string, error) { b.mutex.Lock() defer b.mutex.Unlock() @@ -57,6 +62,19 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string mod_vlan_id = fmt.Sprintf("mod_vlan_vid:%d,", config.Gateway.VLANID) } + // Problem: ovn-controller connects to SB DB and then GARPs for any EIPs configured however for IC, SB DB maybe stale if + // ovnkube-controller is not processing. + // Solution: add a logical flow on startup to allow GARPs from Node IPs but drop other GARPs and remove when ovnkube-controller + // has sync'd and changes propagated to OVN SB DB. + // remove when ovn contains native support for logical router ports to contain an option to silence GARPs on startup of ovn-controller. + // https://issues.redhat.com/browse/FDP-1537 + if b.dropGARP { + // priority 499 flows to allow GARP pkts when src IP is a Node IP + dftFlows = append(dftFlows, b.allowNodeIPGARPFlows(extraIPs)...) + // priority 498 flows to drop GARP pkts with no regards to src IP + dftFlows = append(dftFlows, b.dropGARPFlows()...) + } + if config.IPv4Mode { // table0, Geneve packets coming from external. Skip conntrack and go directly to host // if dest mac is the shared mac send directly to host. @@ -82,9 +100,10 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string for _, netConfig := range b.patchedNetConfigs() { // table 0, SVC Hairpin from OVN destined to local host, DNAT and go to table 4 dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_dst=%s, %s_src=%s,"+ "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String(), physicalIP.IP, + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefixV4, protoPrefixV4, + config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String(), protoPrefixV4, physicalIP.IP, config.Default.HostMasqConntrackZone, physicalIP.IP)) } @@ -105,18 +124,20 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string for _, netConfig := range b.patchedNetConfigs() { dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_dst=%s, %s_src=%s,"+ "actions=ct(commit,zone=%d,table=4)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, ip.String(), physicalIP.IP, + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefixV4, + protoPrefixV4, ip.String(), protoPrefixV4, physicalIP.IP, config.Default.HostMasqConntrackZone)) } } // table 0, Reply SVC traffic from Host -> OVN, unSNAT and goto table 5 dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s,"+ + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_dst=%s,"+ "actions=ct(zone=%d,nat,table=5)", - nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) + nodetypes.DefaultOpenFlowCookie, ofPortHost, protoPrefixV4, protoPrefixV4, + config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) } if config.IPv6Mode { if ofPortPhys != "" { @@ -144,9 +165,10 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string // table 0, SVC Hairpin from OVN destined to local host, DNAT to host, send to table 4 for _, netConfig := range b.patchedNetConfigs() { dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_dst=%s, %s_src=%s,"+ "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String(), physicalIP.IP, + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefixV6, protoPrefixV6, + config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String(), protoPrefixV6, physicalIP.IP, config.Default.HostMasqConntrackZone, physicalIP.IP)) } @@ -167,18 +189,20 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string for _, netConfig := range b.patchedNetConfigs() { dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_dst=%s, %s_src=%s,"+ "actions=ct(commit,zone=%d,table=4)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, ip.String(), physicalIP.IP, + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefixV6, protoPrefixV6, + ip.String(), protoPrefixV6, physicalIP.IP, config.Default.HostMasqConntrackZone)) } } // table 0, Reply SVC traffic from Host -> OVN, unSNAT and goto table 5 dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s,"+ + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_dst=%s,"+ "actions=ct(zone=%d,nat,table=5)", - nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) + nodetypes.DefaultOpenFlowCookie, ofPortHost, protoPrefixV6, protoPrefixV6, + config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) } var protoPrefix, masqIP, masqSubnet string @@ -186,11 +210,11 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string // table 0, packets coming from Host -> Service for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { if utilnet.IsIPv4CIDR(svcCIDR) { - protoPrefix = "ip" + protoPrefix = protoPrefixV4 masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() masqSubnet = config.Gateway.V4MasqueradeSubnet } else { - protoPrefix = "ipv6" + protoPrefix = protoPrefixV6 masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() masqSubnet = config.Gateway.V6MasqueradeSubnet } @@ -287,50 +311,50 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string if config.IPv4Mode { // table 1, established and related connections in zone 64000 with ct_mark CtMarkOVN go to OVN dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) + fmt.Sprintf("cookie=%s, priority=100, table=1, %s, ct_state=+trk+est, ct_mark=%s, "+ + "actions=%s", nodetypes.DefaultOpenFlowCookie, protoPrefixV4, netConfig.MasqCTMark, actions)) dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) + fmt.Sprintf("cookie=%s, priority=100, table=1, %s, ct_state=+trk+rel, ct_mark=%s, "+ + "actions=%s", nodetypes.DefaultOpenFlowCookie, protoPrefixV4, netConfig.MasqCTMark, actions)) } if config.IPv6Mode { // table 1, established and related connections in zone 64000 with ct_mark CtMarkOVN go to OVN dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) + fmt.Sprintf("cookie=%s, priority=100, table=1, %s, ct_state=+trk+est, ct_mark=%s, "+ + "actions=%s", nodetypes.DefaultOpenFlowCookie, protoPrefixV6, netConfig.MasqCTMark, actions)) dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) + fmt.Sprintf("cookie=%s, priority=100, table=1, %s, ct_state=+trk+rel, ct_mark=%s, "+ + "actions=%s", nodetypes.DefaultOpenFlowCookie, protoPrefixV6, netConfig.MasqCTMark, actions)) } } if config.IPv4Mode { // table 1, established and related connections in zone 64000 with ct_mark CtMarkHost go to host dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip, ct_state=+trk+est, ct_mark=%s, "+ + fmt.Sprintf("cookie=%s, priority=100, table=1, %s %s, ct_state=+trk+est, ct_mark=%s, "+ "actions=%soutput:%s", - nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) + nodetypes.DefaultOpenFlowCookie, match_vlan, protoPrefixV4, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip, ct_state=+trk+rel, ct_mark=%s, "+ + fmt.Sprintf("cookie=%s, priority=100, table=1, %s %s, ct_state=+trk+rel, ct_mark=%s, "+ "actions=%soutput:%s", - nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) + nodetypes.DefaultOpenFlowCookie, match_vlan, protoPrefixV4, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) } if config.IPv6Mode { // table 1, established and related connections in zone 64000 with ct_mark CtMarkHost go to host dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip6, ct_state=+trk+est, ct_mark=%s, "+ + fmt.Sprintf("cookie=%s, priority=100, table=1, %s %s, ct_state=+trk+est, ct_mark=%s, "+ "actions=%soutput:%s", - nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) + nodetypes.DefaultOpenFlowCookie, match_vlan, protoPrefixV6, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip6, ct_state=+trk+rel, ct_mark=%s, "+ + fmt.Sprintf("cookie=%s, priority=100, table=1, %s %s, ct_state=+trk+rel, ct_mark=%s, "+ "actions=%soutput:%s", - nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) + nodetypes.DefaultOpenFlowCookie, match_vlan, protoPrefixV6, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) } @@ -372,22 +396,23 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string // the correct patch port of it's own network where it's a deadend if the clusterIP is not part of // that UDN network and works if it is part of the UDN network. dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, table=2, ip, ip_src=%s, "+ + fmt.Sprintf("cookie=%s, priority=200, table=2, %s, %s_src=%s, "+ "actions=drop", - nodetypes.DefaultOpenFlowCookie, matchingIPFamilySubnet.String())) + nodetypes.DefaultOpenFlowCookie, protoPrefixV4, protoPrefixV4, matchingIPFamilySubnet.String())) } // Drop traffic coming from the masquerade IP or the UDN subnet(for advertised UDNs) to ensure that // isolation between networks is enforced. This handles the case where a pod on the UDN subnet is sending traffic to // a service in another UDN. dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, table=2, ip, ip_src=%s, "+ + fmt.Sprintf("cookie=%s, priority=200, table=2, %s, %s_src=%s, "+ "actions=drop", - nodetypes.DefaultOpenFlowCookie, netConfig.V4MasqIPs.ManagementPort.IP.String())) + nodetypes.DefaultOpenFlowCookie, protoPrefixV4, protoPrefixV4, + netConfig.V4MasqIPs.ManagementPort.IP.String())) dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=250, table=2, ip, pkt_mark=%s, "+ + fmt.Sprintf("cookie=%s, priority=250, table=2, %s, pkt_mark=%s, "+ "actions=set_field:%s->eth_dst,output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.PktMark, + nodetypes.DefaultOpenFlowCookie, protoPrefixV4, netConfig.PktMark, bridgeMacAddress, netConfig.OfPortPatch)) } } @@ -411,18 +436,20 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string } dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, ipv6_src=%s, "+ + fmt.Sprintf("cookie=%s, priority=200, table=2, %s, %s_src=%s, "+ "actions=drop", - nodetypes.DefaultOpenFlowCookie, matchingIPFamilySubnet.String())) + nodetypes.DefaultOpenFlowCookie, protoPrefixV6, protoPrefixV6, + matchingIPFamilySubnet.String())) } dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, ipv6_src=%s, "+ + fmt.Sprintf("cookie=%s, priority=200, table=2, %s, %s_src=%s, "+ "actions=drop", - nodetypes.DefaultOpenFlowCookie, netConfig.V6MasqIPs.ManagementPort.IP.String())) + nodetypes.DefaultOpenFlowCookie, protoPrefixV6, protoPrefixV6, + netConfig.V6MasqIPs.ManagementPort.IP.String())) dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=250, table=2, ip6, pkt_mark=%s, "+ + fmt.Sprintf("cookie=%s, priority=250, table=2, %s, pkt_mark=%s, "+ "actions=set_field:%s->eth_dst,output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.PktMark, + nodetypes.DefaultOpenFlowCookie, protoPrefixV6, netConfig.PktMark, bridgeMacAddress, netConfig.OfPortPatch)) } } @@ -437,28 +464,28 @@ func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string // We need to SNAT and masquerade OVN GR IP, send to table 3 for dispatch to Host if config.IPv4Mode { dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=4,ip,"+ + fmt.Sprintf("cookie=%s, table=4,%s,"+ "actions=ct(commit,zone=%d,nat(src=%s),table=3)", - nodetypes.DefaultOpenFlowCookie, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String())) + nodetypes.DefaultOpenFlowCookie, protoPrefixV4, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String())) } if config.IPv6Mode { dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=4,ipv6, "+ + fmt.Sprintf("cookie=%s, table=4,%s, "+ "actions=ct(commit,zone=%d,nat(src=%s),table=3)", - nodetypes.DefaultOpenFlowCookie, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String())) + nodetypes.DefaultOpenFlowCookie, protoPrefixV6, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String())) } // table 5, Host Reply traffic to hairpinned svc, need to unDNAT, send to table 2 if config.IPv4Mode { dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=5, ip, "+ + fmt.Sprintf("cookie=%s, table=5, %s, "+ "actions=ct(commit,zone=%d,nat,table=2)", - nodetypes.DefaultOpenFlowCookie, config.Default.HostMasqConntrackZone)) + nodetypes.DefaultOpenFlowCookie, protoPrefixV4, config.Default.HostMasqConntrackZone)) } if config.IPv6Mode { dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=5, ipv6, "+ + fmt.Sprintf("cookie=%s, table=5, %s, "+ "actions=ct(commit,zone=%d,nat,table=2)", - nodetypes.DefaultOpenFlowCookie, config.Default.HostMasqConntrackZone)) + nodetypes.DefaultOpenFlowCookie, protoPrefixV6, config.Default.HostMasqConntrackZone)) } return dftFlows, nil } @@ -478,18 +505,20 @@ func generateIPFragmentReassemblyFlow(ofPortPhys string) []string { flows := make([]string, 0, 2) if config.IPv4Mode { flows = append(flows, - fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, ip, nw_frag=yes, actions=ct(table=0,zone=%d)", + fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, %s, nw_frag=yes, actions=ct(table=0,zone=%d)", nodetypes.DefaultOpenFlowCookie, ofPortPhys, + protoPrefixV4, config.Default.ReassemblyConntrackZone, ), ) } if config.IPv6Mode { flows = append(flows, - fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, ipv6, nw_frag=yes, actions=ct(table=0,zone=%d)", + fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, %s, nw_frag=yes, actions=ct(table=0,zone=%d)", nodetypes.DefaultOpenFlowCookie, ofPortPhys, + protoPrefixV6, config.Default.ReassemblyConntrackZone, ), ) @@ -498,6 +527,26 @@ func generateIPFragmentReassemblyFlow(ofPortPhys string) []string { return flows } +// generateGratuitousARPDropFlow returns a single flow to drop GARPs +// Remove when https://issues.redhat.com/browse/FDP-1537 available +func generateGratuitousARPDropFlow(inPort string, priority int) string { + // set to op code 1 - see rfc5227 particularly section: + // Why Are ARP Announcements Performed Using ARP Request Packets and Not ARP Reply Packets? + // ovn follows this practise of using op code 1 + return fmt.Sprintf("cookie=%s,table=0,priority=%d,in_port=%s,dl_dst=ff:ff:ff:ff:ff:ff,arp,arp_op=1,actions=drop", + nodetypes.GARPCookie, priority, inPort) +} + +// generateGratuitousARPAllowFlow returns a single flow to allow GARP only for a specific source IP. +// Remove when https://issues.redhat.com/browse/FDP-1537 available +func generateGratuitousARPAllowFlow(inPort string, ip net.IP, priority int) string { + // set to op code 1 - see rfc5227 particularly section: + // Why Are ARP Announcements Performed Using ARP Request Packets and Not ARP Reply Packets? + // ovn follows this practise of using op code 1 + return fmt.Sprintf("cookie=%s,table=0,priority=%d,in_port=%s,dl_dst=ff:ff:ff:ff:ff:ff,arp,arp_op=1,arp_spa=%s,actions=output:NORMAL", + nodetypes.GARPCookie, priority, inPort, ip) +} + // must be called with bridge.mutex held func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, error) { // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure @@ -555,10 +604,10 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e // egressService pods will also undergo this SNAT to nodeIP since these features are tied // together at the OVN policy level on the distributed router. dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%s "+ + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, %s, pkt_mark=%s "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, nodetypes.OvnKubeNodeSNATMark, - config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, protoPrefixV4, + nodetypes.OvnKubeNodeSNATMark, config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. @@ -567,9 +616,9 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e if netConfig.MasqCTMark != nodetypes.CtMarkOVN { for mark, eip := range b.eipMarkIPs.GetIPv4() { dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%d, "+ + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, %s, pkt_mark=%d, "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, mark, + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, protoPrefixV4, mark, config.Default.ConntrackZone, eip, netConfig.MasqCTMark, ofPortPhys)) } } @@ -579,10 +628,10 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e // so that reverse direction goes back to the pods. if netConfig.IsDefaultNetwork() { dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, "+ + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, %s, "+ "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, config.Default.ConntrackZone, - netConfig.MasqCTMark, ofPortPhys)) + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, protoPrefixV4, + config.Default.ConntrackZone, netConfig.MasqCTMark, ofPortPhys)) // Allow (a) OVN->host traffic on the same node // (b) host->host traffic on the same node @@ -592,9 +641,10 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e } else { // for UDN we additionally SNAT the packet from masquerade IP -> node IP dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, ip_src=%s, "+ + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, %s, %s_src=%s, "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, netConfig.V4MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, protoPrefixV4, protoPrefixV4, + netConfig.V4MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) } } @@ -602,9 +652,10 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e // table 0, packets coming from host Commit connections with ct_mark CtMarkHost // so that reverse direction goes back to the host. dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, ip, "+ + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, %s, "+ "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), %soutput:%s", - nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, nodetypes.CtMarkHost, mod_vlan_id, ofPortPhys)) + nodetypes.DefaultOpenFlowCookie, ofPortHost, protoPrefixV4, config.Default.ConntrackZone, + nodetypes.CtMarkHost, mod_vlan_id, ofPortPhys)) } if config.Gateway.Mode == config.GatewayModeLocal { for _, netConfig := range b.patchedNetConfigs() { @@ -632,12 +683,12 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e } if ofPortPhys != "" { - // table 0, packets coming from external or other localnet ports. Send it through conntrack and - // resubmit to table 1 to know the state and mark of the connection. + // table 0, packets coming from external or other localnet ports and destined to OVN or LOCAL. + // Send it through conntrack and resubmit to table 1 to know the state and mark of the connection. // Note, there are higher priority rules that take care of traffic coming from LOCAL and OVN ports. dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=50, ip, actions=ct(zone=%d, nat, table=1)", - nodetypes.DefaultOpenFlowCookie, config.Default.ConntrackZone)) + fmt.Sprintf("cookie=%s, priority=50, %s, dl_dst=%s, actions=ct(zone=%d, nat, table=1)", + nodetypes.DefaultOpenFlowCookie, protoPrefixV4, bridgeMacAddress, config.Default.ConntrackZone)) } } @@ -654,9 +705,9 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e // egressService pods will also undergo this SNAT to nodeIP since these features are tied // together at the OVN policy level on the distributed router. dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%s "+ + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, %s, pkt_mark=%s "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, nodetypes.OvnKubeNodeSNATMark, + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, protoPrefixV6, nodetypes.OvnKubeNodeSNATMark, config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to @@ -666,9 +717,9 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e if netConfig.MasqCTMark != nodetypes.CtMarkOVN { for mark, eip := range b.eipMarkIPs.GetIPv6() { dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%d, "+ + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, %s, pkt_mark=%d, "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, mark, + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, protoPrefixV6, mark, config.Default.ConntrackZone, eip, netConfig.MasqCTMark, ofPortPhys)) } } @@ -678,9 +729,10 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e // so that reverse direction goes back to the pods. if netConfig.IsDefaultNetwork() { dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, "+ + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, %s, "+ "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, config.Default.ConntrackZone, netConfig.MasqCTMark, ofPortPhys)) + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, protoPrefixV6, + config.Default.ConntrackZone, netConfig.MasqCTMark, ofPortPhys)) // Allow (a) OVN->host traffic on the same node // (b) host->host traffic on the same node @@ -690,9 +742,10 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e } else { // for UDN we additionally SNAT the packet from masquerade IP -> node IP dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, ipv6_src=%s, "+ + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, %s, %s_src=%s, "+ "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, netConfig.V6MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, protoPrefixV6, protoPrefixV6, + netConfig.V6MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) } } @@ -700,9 +753,10 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e // table 0, packets coming from host. Commit connections with ct_mark CtMarkHost // so that reverse direction goes back to the host. dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, ipv6, "+ + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, %s, "+ "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), %soutput:%s", - nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, nodetypes.CtMarkHost, mod_vlan_id, ofPortPhys)) + nodetypes.DefaultOpenFlowCookie, ofPortHost, protoPrefixV6, + config.Default.ConntrackZone, nodetypes.CtMarkHost, mod_vlan_id, ofPortPhys)) } if config.Gateway.Mode == config.GatewayModeLocal { @@ -710,17 +764,17 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp6, ipv6_src=%s, "+ + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp6, %s_src=%s, "+ "actions=ct(table=4,zone=%d)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefixV6, physicalIP.IP, config.Default.HostMasqConntrackZone)) dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp6, ipv6_src=%s, "+ + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp6, %s_src=%s, "+ "actions=ct(table=4,zone=%d)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefixV6, physicalIP.IP, config.Default.HostMasqConntrackZone)) dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp6, ipv6_src=%s, "+ + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp6, %s_src=%s, "+ "actions=ct(table=4,zone=%d)", - nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefixV6, physicalIP.IP, config.Default.HostMasqConntrackZone)) if ofPortPhys != "" { // We send BFD traffic coming from OVN to outside directly using a higher priority flow dftFlows = append(dftFlows, @@ -733,8 +787,8 @@ func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, e // table 0, packets coming from external. Send it through conntrack and // resubmit to table 1 to know the state and mark of the connection. dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=50, in_port=%s, ipv6, "+ - "actions=ct(zone=%d, nat, table=1)", nodetypes.DefaultOpenFlowCookie, ofPortPhys, config.Default.ConntrackZone)) + fmt.Sprintf("cookie=%s, priority=50, %s, dl_dst=%s, actions=ct(zone=%d, nat, table=1)", + nodetypes.DefaultOpenFlowCookie, protoPrefixV6, bridgeMacAddress, config.Default.ConntrackZone)) } } if ofPortPhys != "" { @@ -910,10 +964,59 @@ func (b *BridgeConfiguration) PMTUDDropFlows(ipAddrs []string) []string { return flows } +// dropGARPFlows generates the ovs flows for dropping gratuitous ARPs for cluster default network traffic only. +// bridgeConfiguration lock must be held by caller +func (b *BridgeConfiguration) dropGARPFlows() []string { + if config.Gateway.Mode != config.GatewayModeShared || !config.IPv4Mode { + return nil + } + const priority = 498 + var flows []string + + defaultNetInfo := util.DefaultNetInfo{} + defaultNetPatchPortName := defaultNetInfo.GetNetworkScopedPatchPortName(b.bridgeName, b.nodeName) + + for _, netConfig := range b.patchedNetConfigs() { + if netConfig.PatchPort != defaultNetPatchPortName { + continue + } + flows = append(flows, generateGratuitousARPDropFlow(netConfig.OfPortPatch, priority)) + } + return flows +} + +// allowNodeIPGARPFlows generates the OVS flows to allow gratuitous ARPs for Node IP(s) for the cluster default network traffic only. +// bridgeConfiguration lock must be held by caller. +// Remove when https://issues.redhat.com/browse/FDP-1537 is available +func (b *BridgeConfiguration) allowNodeIPGARPFlows(nodeIPs []net.IP) []string { + if config.Gateway.Mode != config.GatewayModeShared || !config.IPv4Mode { + return nil + } + const priority = 499 + var flows []string + + defaultNetInfo := util.DefaultNetInfo{} + defaultNetPatchPortName := defaultNetInfo.GetNetworkScopedPatchPortName(b.bridgeName, b.nodeName) + + for _, netConfig := range b.patchedNetConfigs() { + if netConfig.PatchPort != defaultNetPatchPortName { + continue + } + for _, nodeIP := range nodeIPs { + if nodeIP == nil || nodeIP.IsUnspecified() || utilnet.IsIPv6(nodeIP) { + continue + } + flows = append(flows, generateGratuitousARPAllowFlow(netConfig.OfPortPatch, nodeIP, priority)) + } + + } + return flows +} + func getIPv(ipnet *net.IPNet) string { - prefix := "ip" + prefix := protoPrefixV4 if utilnet.IsIPv6CIDR(ipnet) { - prefix = "ipv6" + prefix = protoPrefixV6 } return prefix } @@ -929,10 +1032,10 @@ func hostNetworkNormalActionFlows(netConfig *BridgeUDNConfiguration, srcMAC stri var ipFamily, ipFamilyDest string if isV6 { - ipFamily = "ipv6" - ipFamilyDest = "ipv6_dst" + ipFamily = protoPrefixV6 + ipFamilyDest = protoPrefixV6 + "_dst" } else { - ipFamily = "ip" + ipFamily = protoPrefixV4 ipFamilyDest = "nw_dst" } diff --git a/go-controller/pkg/node/controllers/egressip/egressip.go b/go-controller/pkg/node/controllers/egressip/egressip.go index be769bc87a..3c6b340bbf 100644 --- a/go-controller/pkg/node/controllers/egressip/egressip.go +++ b/go-controller/pkg/node/controllers/egressip/egressip.go @@ -566,7 +566,7 @@ func (c *Controller) processEIP(eip *eipv1.EgressIP) (*eIPConfig, sets.Set[strin if err != nil { return nil, selectedNamespaces, selectedPods, selectedNamespacesPodIPs, fmt.Errorf("failed to get active network for namespace %s: %v", namespace.Name, err) } - if netInfo.IsSecondary() { + if netInfo.IsUserDefinedNetwork() { // EIP for secondary host interfaces is not supported for secondary networks continue } @@ -1035,7 +1035,7 @@ func (c *Controller) repairNode() error { if err != nil { return fmt.Errorf("failed to get active network for namespace %s: %v", namespace.Name, err) } - if netInfo.IsSecondary() { + if netInfo.IsUserDefinedNetwork() { // EIP for secondary host interfaces is not supported for secondary networks continue } diff --git a/go-controller/pkg/node/default_node_network_controller.go b/go-controller/pkg/node/default_node_network_controller.go index eb44f638d4..9e0d28b582 100644 --- a/go-controller/pkg/node/default_node_network_controller.go +++ b/go-controller/pkg/node/default_node_network_controller.go @@ -854,8 +854,8 @@ func portExists(namespace, name string) bool { /** HACK END **/ // Init executes the first steps to start the DefaultNodeNetworkController. -// It is split from Start() and executed before SecondaryNodeNetworkController (SNNC), -// to allow SNNC to reference the openflow manager created in Init. +// It is split from Start() and executed before UserDefinedNodeNetworkController (UDNNC) +// to allow UDNNC to reference the openflow manager created in Init. func (nc *DefaultNodeNetworkController) Init(ctx context.Context) error { klog.Infof("Initializing the default node network controller") @@ -1008,10 +1008,14 @@ func (nc *DefaultNodeNetworkController) Init(ctx context.Context) error { return fmt.Errorf("failed to set node zone annotation for node %s: %w", nc.name, err) } - encapIPList := sets.New[string]() - encapIPList.Insert(strings.Split(config.Default.EffectiveEncapIP, ",")...) - if err := util.SetNodeEncapIPs(nodeAnnotator, encapIPList); err != nil { - return fmt.Errorf("failed to set node-encap-ips annotation for node %s: %w", nc.name, err) + // Set the node-encap-ips annotation with the configured encap IP. + // This encap IP is unavailable on the DPU host mode, so we don't need to set it there. + if config.OvnKubeNode.Mode != types.NodeModeDPUHost { + encapIPList := sets.New[string]() + encapIPList.Insert(strings.Split(config.Default.EffectiveEncapIP, ",")...) + if err := util.SetNodeEncapIPs(nodeAnnotator, encapIPList); err != nil { + return fmt.Errorf("failed to set node-encap-ips annotation for node %s: %w", nc.name, err) + } } if err := nodeAnnotator.Run(); err != nil { diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index 35267261f2..97e7baeecb 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -37,6 +37,7 @@ type Gateway interface { GetGatewayIface() string SetDefaultGatewayBridgeMAC(addr net.HardwareAddr) SetDefaultPodNetworkAdvertised(bool) + SetDefaultBridgeGARPDropFlows(bool) Reconcile() error } @@ -482,6 +483,15 @@ func (g *gateway) GetDefaultPodNetworkAdvertised() bool { return g.openflowManager.defaultBridge.GetNetworkConfig(types.DefaultNetworkName).Advertised.Load() } +// SetDefaultBridgeGARPDropFlows will enable flows to drop GARPs if the openflow +// manager has been initialized. +func (g *gateway) SetDefaultBridgeGARPDropFlows(isDropped bool) { + if g.openflowManager == nil { + return + } + g.openflowManager.setDefaultBridgeGARPDrop(isDropped) +} + // Reconcile handles triggering updates to different components of a gateway, like OFM, Services func (g *gateway) Reconcile() error { klog.Info("Reconciling gateway with updates") diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index 35e409618b..5d1d94cba6 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -41,6 +41,8 @@ import ( ) const ( + protoPrefixV4 = "ip" + protoPrefixV6 = "ipv6" // etpSvcOpenFlowCookie identifies constant open flow rules added to the host OVS // bridge to move packets between host and external for etp=local traffic. // The hex number 0xe745ecf105, represents etp(e74)-service(5ec)-flows which makes it easier for debugging. @@ -400,32 +402,37 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI return utilerrors.Join(errors...) } - ipPrefix := "ip" - if !utilnet.IsIPv4String(service.Spec.ClusterIP) { - ipPrefix = "ipv6" - } - // table 2, user-defined network host -> OVN towards default cluster network services defaultNetConfig := npw.ofm.defaultBridge.GetActiveNetworkBridgeConfigCopy(types.DefaultNetworkName) - // sample flow: cookie=0xdeff105, duration=2319.685s, table=2, n_packets=496, n_bytes=67111, priority=300, - // ip,nw_dst=10.96.0.1 actions=mod_dl_dst:02:42:ac:12:00:03,output:"patch-breth0_ov" - // This flow is used for UDNs and advertised UDNs to be able to reach kapi and dns services alone on default network - flows := []string{fmt.Sprintf("cookie=%s, priority=300, table=2, %s, %s_dst=%s, "+ - "actions=set_field:%s->eth_dst,output:%s", - nodetypes.DefaultOpenFlowCookie, ipPrefix, ipPrefix, service.Spec.ClusterIP, - npw.ofm.getDefaultBridgeMAC().String(), defaultNetConfig.OfPortPatch)} - if util.IsRouteAdvertisementsEnabled() { - // if the network is advertised, then for the reply from kapi and dns services to go back - // into the UDN's VRF we need flows that statically send this to the local port - // sample flow: cookie=0xdeff105, duration=264.196s, table=0, n_packets=0, n_bytes=0, priority=490,ip, - // in_port="patch-breth0_ov",nw_src=10.96.0.10,actions=ct(table=3,zone=64001,nat) - // this flow is meant to match all advertised UDNs and then the ip rules on the host will take - // this packet into the corresponding UDNs - // NOTE: We chose priority 490 to differentiate this flow from the flow at priority 500 added for the - // non-advertised UDNs reponse for debugging purposes: - // sample flow for non-advertised UDNs: cookie=0xdeff105, duration=684.087s, table=0, n_packets=0, n_bytes=0, - // idle_age=684, priority=500,ip,in_port=2,nw_src=10.96.0.0/16,nw_dst=169.254.0.0/17 actions=ct(table=3,zone=64001,nat) - flows = append(flows, fmt.Sprintf("cookie=%s, priority=490, in_port=%s, ip, ip_src=%s,actions=ct(zone=%d,nat,table=3)", - nodetypes.DefaultOpenFlowCookie, defaultNetConfig.OfPortPatch, service.Spec.ClusterIP, config.Default.HostMasqConntrackZone)) + var flows []string + clusterIPs := util.GetClusterIPs(service) + for _, clusterIP := range clusterIPs { + ipPrefix := protoPrefixV4 + if utilnet.IsIPv6String(clusterIP) { + ipPrefix = protoPrefixV6 + } + // table 2, user-defined network host -> OVN towards default cluster network services + // sample flow: cookie=0xdeff105, duration=2319.685s, table=2, n_packets=496, n_bytes=67111, priority=300, + // ip,nw_dst=10.96.0.1 actions=mod_dl_dst:02:42:ac:12:00:03,output:"patch-breth0_ov" + // This flow is used for UDNs and advertised UDNs to be able to reach kapi and dns services alone on default network + flows = append(flows, fmt.Sprintf("cookie=%s, priority=300, table=2, %s, %s_dst=%s, "+ + "actions=set_field:%s->eth_dst,output:%s", + nodetypes.DefaultOpenFlowCookie, ipPrefix, ipPrefix, clusterIP, + npw.ofm.getDefaultBridgeMAC().String(), defaultNetConfig.OfPortPatch)) + + if util.IsRouteAdvertisementsEnabled() { + // if the network is advertised, then for the reply from kapi and dns services to go back + // into the UDN's VRF we need flows that statically send this to the local port + // sample flow: cookie=0xdeff105, duration=264.196s, table=0, n_packets=0, n_bytes=0, priority=490,ip, + // in_port="patch-breth0_ov",nw_src=10.96.0.10,actions=ct(table=3,zone=64001,nat) + // this flow is meant to match all advertised UDNs and then the ip rules on the host will take + // this packet into the corresponding UDNs + // NOTE: We chose priority 490 to differentiate this flow from the flow at priority 500 added for the + // non-advertised UDNs reponse for debugging purposes: + // sample flow for non-advertised UDNs: cookie=0xdeff105, duration=684.087s, table=0, n_packets=0, n_bytes=0, + // idle_age=684, priority=500,ip,in_port=2,nw_src=10.96.0.0/16,nw_dst=169.254.0.0/17 actions=ct(table=3,zone=64001,nat) + flows = append(flows, fmt.Sprintf("cookie=%s, priority=490, in_port=%s, %s, %s_src=%s,actions=ct(zone=%d,nat,table=3)", + nodetypes.DefaultOpenFlowCookie, defaultNetConfig.OfPortPatch, ipPrefix, ipPrefix, clusterIP, config.Default.HostMasqConntrackZone)) + } } npw.ofm.updateFlowCacheEntry(key, flows) } diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 4611ea97ed..984a666195 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -101,6 +101,12 @@ func setManagementPortFakeCommands(fexec *ovntest.FakeExec, nodeName string) { Cmd: "ip route replace table 7 172.16.1.0/24 via 100.128.0.1 dev ovn-k8s-mp0", Output: "0", }) + if config.IPv6Mode { + fexec.AddFakeCmd(&ovntest.ExpectedCmd{ + Cmd: "ip route replace table 7 fd02::/112 via ae70::1 dev ovn-k8s-mp0", + Output: "0", + }) + } fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ip -4 rule", Output: "0", @@ -276,6 +282,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { // Restore global default values before each testcase err := config.PrepareTestConfig() Expect(err).NotTo(HaveOccurred()) + + // Set dual-stack service CIDRs directly after PrepareTestConfig + config.Kubernetes.ServiceCIDRs = ovntest.MustParseIPNets("172.16.1.0/24", "fd02::/112") + config.OVNKubernetesFeature.EnableMultiNetwork = true config.OVNKubernetesFeature.EnableNetworkSegmentation = true // Use a larger masq subnet to allow OF manager to allocate IPs for UDNs. @@ -653,7 +663,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { &kubeMock, vrf, ipRulesManager, localGw) Expect(err).NotTo(HaveOccurred()) flowMap := udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) + Expect(flowMap["DEFAULT"]).To(HaveLen(50)) Expect(udnGateway.masqCTMark).To(Equal(udnGateway.masqCTMark)) var udnFlows int @@ -671,7 +681,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default + Expect(flowMap["DEFAULT"]).To(HaveLen(70)) // 18 UDN Flows are added by default Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(2)) // default network + UDN network defaultUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("default") bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("bluenet") @@ -687,7 +697,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } } - Expect(udnFlows).To(Equal(14)) + Expect(udnFlows).To(Equal(16)) openflowManagerCheckPorts(udnGateway.openflowManager) for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { @@ -707,7 +717,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present + Expect(flowMap["DEFAULT"]).To(HaveLen(50)) // only default network flows are present Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // default network only udnFlows = 0 for _, flows := range flowMap { @@ -885,7 +895,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { &kubeMock, vrf, ipRulesManager, localGw) Expect(err).NotTo(HaveOccurred()) flowMap := udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) + Expect(flowMap["DEFAULT"]).To(HaveLen(50)) Expect(udnGateway.masqCTMark).To(Equal(udnGateway.masqCTMark)) var udnFlows int for _, flows := range flowMap { @@ -902,7 +912,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default + Expect(flowMap["DEFAULT"]).To(HaveLen(70)) // 18 UDN Flows are added by default Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(2)) // default network + UDN network defaultUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("default") bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("bluenet") @@ -918,7 +928,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } } - Expect(udnFlows).To(Equal(14)) + Expect(udnFlows).To(Equal(16)) openflowManagerCheckPorts(udnGateway.openflowManager) for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { @@ -938,7 +948,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present + Expect(flowMap["DEFAULT"]).To(HaveLen(50)) // only default network flows are present Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // default network only udnFlows = 0 for _, flows := range flowMap { @@ -1125,7 +1135,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { &kubeMock, vrf, ipRulesManager, localGw) Expect(err).NotTo(HaveOccurred()) flowMap := udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) + Expect(flowMap["DEFAULT"]).To(HaveLen(50)) Expect(udnGateway.masqCTMark).To(Equal(udnGateway.masqCTMark)) var udnFlows int @@ -1143,7 +1153,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(73)) // 18 UDN Flows, 5 advertisedUDN flows, and 2 packet mark flows (IPv4+IPv6) are added by default + Expect(flowMap["DEFAULT"]).To(HaveLen(80)) // 18 UDN Flows, 5 advertisedUDN flows, and 2 packet mark flows (IPv4+IPv6) are added by default Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(2)) // default network + UDN network defaultUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("default") bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("bluenet") @@ -1159,7 +1169,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } } - Expect(udnFlows).To(Equal(16)) + Expect(udnFlows).To(Equal(18)) openflowManagerCheckPorts(udnGateway.openflowManager) for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { @@ -1181,7 +1191,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present + Expect(flowMap["DEFAULT"]).To(HaveLen(50)) // only default network flows are present Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // default network only udnFlows = 0 for _, flows := range flowMap { @@ -1237,39 +1247,44 @@ var _ = Describe("UserDefinedNetworkGateway", func() { routes, err := udnGateway.computeRoutesForUDN(mplink) Expect(err).NotTo(HaveOccurred()) - Expect(routes).To(HaveLen(9)) - Expect(err).NotTo(HaveOccurred()) + Expect(routes).To(HaveLen(10)) + Expect(*routes[0].Dst).To(Equal(*ovntest.MustParseIPNet("172.16.1.0/24"))) // default service subnet Expect(routes[0].LinkIndex).To(Equal(bridgelink.Attrs().Index)) Expect(routes[0].Gw).To(Equal(config.Gateway.MasqueradeIPs.V4DummyNextHopMasqueradeIP)) + + Expect(*routes[1].Dst).To(Equal(*ovntest.MustParseIPNet("fd02::/112"))) // default service subnet + Expect(routes[1].LinkIndex).To(Equal(bridgelink.Attrs().Index)) + Expect(routes[1].Gw).To(Equal(config.Gateway.MasqueradeIPs.V6DummyNextHopMasqueradeIP)) + cidr, err := util.GetIPNetFullMask("169.254.0.16") Expect(err).NotTo(HaveOccurred()) - Expect(*routes[1].Dst).To(Equal(*cidr)) - Expect(routes[1].LinkIndex).To(Equal(mplink.Attrs().Index)) - cidr, err = util.GetIPNetFullMask("fd69::10") - Expect(err).NotTo(HaveOccurred()) Expect(*routes[2].Dst).To(Equal(*cidr)) Expect(routes[2].LinkIndex).To(Equal(mplink.Attrs().Index)) - - // IPv4 ETP=Local service masquerade IP route - Expect(*routes[3].Dst).To(Equal(*ovntest.MustParseIPNet("169.254.169.3/32"))) // ETP=Local svc masq IP + cidr, err = util.GetIPNetFullMask("fd69::10") + Expect(err).NotTo(HaveOccurred()) + Expect(*routes[3].Dst).To(Equal(*cidr)) Expect(routes[3].LinkIndex).To(Equal(mplink.Attrs().Index)) - Expect(routes[3].Gw.Equal(ovntest.MustParseIP("100.128.0.1"))).To(BeTrue()) - // IPv4 cluster subnet route - Expect(*routes[4].Dst).To(Equal(*ovntest.MustParseIPNet("100.128.0.0/16"))) // cluster subnet route + // IPv4 ETP=Local service masquerade IP route + Expect(*routes[4].Dst).To(Equal(*ovntest.MustParseIPNet("169.254.169.3/32"))) // ETP=Local svc masq IP Expect(routes[4].LinkIndex).To(Equal(mplink.Attrs().Index)) Expect(routes[4].Gw.Equal(ovntest.MustParseIP("100.128.0.1"))).To(BeTrue()) - // IPv6 ETP=Local service masquerade IP route - Expect(*routes[5].Dst).To(Equal(*ovntest.MustParseIPNet("fd69::3/128"))) // ETP=Local svc masq IP + // IPv4 cluster subnet route + Expect(*routes[5].Dst).To(Equal(*ovntest.MustParseIPNet("100.128.0.0/16"))) // cluster subnet route Expect(routes[5].LinkIndex).To(Equal(mplink.Attrs().Index)) - Expect(routes[5].Gw.Equal(ovntest.MustParseIP("ae70::1"))).To(BeTrue()) + Expect(routes[5].Gw.Equal(ovntest.MustParseIP("100.128.0.1"))).To(BeTrue()) - // IPv6 cluster subnet route - Expect(*routes[6].Dst).To(Equal(*ovntest.MustParseIPNet("ae70::/60"))) // cluster subnet route + // IPv6 ETP=Local service masquerade IP route + Expect(*routes[6].Dst).To(Equal(*ovntest.MustParseIPNet("fd69::3/128"))) // ETP=Local svc masq IP Expect(routes[6].LinkIndex).To(Equal(mplink.Attrs().Index)) Expect(routes[6].Gw.Equal(ovntest.MustParseIP("ae70::1"))).To(BeTrue()) + + // IPv6 cluster subnet route + Expect(*routes[7].Dst).To(Equal(*ovntest.MustParseIPNet("ae70::/60"))) // cluster subnet route + Expect(routes[7].LinkIndex).To(Equal(mplink.Attrs().Index)) + Expect(routes[7].Gw.Equal(ovntest.MustParseIP("ae70::1"))).To(BeTrue()) return nil }) Expect(err).NotTo(HaveOccurred()) @@ -1394,11 +1409,11 @@ var _ = Describe("UserDefinedNetworkGateway", func() { routes, err := udnGateway.computeRoutesForUDN(mplink) Expect(err).NotTo(HaveOccurred()) - Expect(routes).To(HaveLen(10)) + Expect(routes).To(HaveLen(11)) Expect(err).NotTo(HaveOccurred()) - Expect(*routes[1].Dst).To(Equal(*ovntest.MustParseIPNet("0.0.0.0/0"))) - Expect(routes[1].LinkIndex).To(Equal(bridgelink.Attrs().Index)) - Expect(routes[1].Gw.Equal(ovntest.MustParseIP(config.Gateway.NextHop))).To(BeTrue()) + Expect(*routes[2].Dst).To(Equal(*ovntest.MustParseIPNet("0.0.0.0/0"))) + Expect(routes[2].LinkIndex).To(Equal(bridgelink.Attrs().Index)) + Expect(routes[2].Gw.Equal(ovntest.MustParseIP(config.Gateway.NextHop))).To(BeTrue()) return nil }) Expect(err).NotTo(HaveOccurred()) @@ -1437,7 +1452,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { routes, err := udnGateway.computeRoutesForUDN(mplink) Expect(err).NotTo(HaveOccurred()) - Expect(routes).To(HaveLen(9)) + Expect(routes).To(HaveLen(10)) Expect(err).NotTo(HaveOccurred()) Expect(*routes[1].Dst).To(Not(Equal(*ovntest.MustParseIPNet("0.0.0.0/0")))) Expect(routes[1].Gw.Equal(ovntest.MustParseIP(config.Gateway.NextHop))).To(BeFalse()) diff --git a/go-controller/pkg/node/node_ip_handler_linux.go b/go-controller/pkg/node/node_ip_handler_linux.go index dcbbbfc7d6..dda4e69da0 100644 --- a/go-controller/pkg/node/node_ip_handler_linux.go +++ b/go-controller/pkg/node/node_ip_handler_linux.go @@ -243,7 +243,7 @@ func (c *addressManager) handleNodePrimaryAddrChange() { klog.Errorf("Address Manager failed to check node primary address change: %v", err) return } - if nodePrimaryAddrChanged && config.Default.EncapIP == "" { + if nodePrimaryAddrChanged && config.Default.EncapIP == "" && config.OvnKubeNode.Mode != types.NodeModeDPUHost { klog.Infof("Node primary address changed to %v. Updating OVN encap IP.", c.nodePrimaryAddr) c.updateOVNEncapIPAndReconnect(c.nodePrimaryAddr) } diff --git a/go-controller/pkg/node/openflow_manager.go b/go-controller/pkg/node/openflow_manager.go index 47a397766b..b55fff21cd 100644 --- a/go-controller/pkg/node/openflow_manager.go +++ b/go-controller/pkg/node/openflow_manager.go @@ -78,6 +78,12 @@ func (c *openflowManager) setDefaultBridgeMAC(macAddr net.HardwareAddr) { c.defaultBridge.SetMAC(macAddr) } +// setDefaultBridgeGARPDrop is used to enable or disable whether openflow manager generates ovs flows and adds them to +// the default ext bridge to drop GARP +func (c *openflowManager) setDefaultBridgeGARPDrop(isDropped bool) { + c.defaultBridge.SetDropGARP(isDropped) +} + func (c *openflowManager) updateFlowCacheEntry(key string, flows []string) { c.flowMutex.Lock() defer c.flowMutex.Unlock() @@ -193,6 +199,9 @@ func (c *openflowManager) Run(stopChan <-chan struct{}, doneWg *sync.WaitGroup) c.syncFlows() timer.Reset(syncPeriod) case <-stopChan: + // sync before shutting down because flows maybe added, and theres a race between flow channel (req sync) + // and stop chan on shutdown. ensure flows are sync before shut down + c.syncFlows() return } } diff --git a/go-controller/pkg/node/routemanager/route_manager.go b/go-controller/pkg/node/routemanager/route_manager.go index e3480e4ca8..d5f40e269a 100644 --- a/go-controller/pkg/node/routemanager/route_manager.go +++ b/go-controller/pkg/node/routemanager/route_manager.go @@ -351,7 +351,9 @@ func routePartiallyEqualWantedToExisting(w, e *netlink.Route) bool { equalOrLeftZero(w.InitRwnd, e.InitRwnd, z.InitRwnd) && equalOrLeftZero(w.QuickACK, e.QuickACK, z.QuickACK) && equalOrLeftZero(w.Congctl, e.Congctl, z.Congctl) && - equalOrLeftZero(w.FastOpenNoCookie, e.FastOpenNoCookie, z.FastOpenNoCookie) + equalOrLeftZero(w.FastOpenNoCookie, e.FastOpenNoCookie, z.FastOpenNoCookie) && + equalOrLeftZero(w.MTULock, e.MTULock, z.MTULock) && + equalOrLeftZero(w.RtoMinLock, e.RtoMinLock, z.RtoMinLock) } func isRouteNotFoundError(err error) bool { diff --git a/go-controller/pkg/node/routemanager/route_manager_test.go b/go-controller/pkg/node/routemanager/route_manager_test.go index d090a6dbe2..999be53693 100644 --- a/go-controller/pkg/node/routemanager/route_manager_test.go +++ b/go-controller/pkg/node/routemanager/route_manager_test.go @@ -369,6 +369,7 @@ var _ = ginkgo.Describe("Route Manager", func() { "Destination": &netlink.Via{Addr: ovntest.MustParseIP("10.0.0.0")}, "Encap": &netlink.IP6tnlEncap{Src: ovntest.MustParseIP("10.0.0.0")}, "string": "test", + "bool": true, } keys := map[string]bool{ "Dst": true, diff --git a/go-controller/pkg/node/types/const.go b/go-controller/pkg/node/types/const.go index bdf9c388bf..a07f4b166f 100644 --- a/go-controller/pkg/node/types/const.go +++ b/go-controller/pkg/node/types/const.go @@ -17,6 +17,10 @@ const ( // PmtudOpenFlowCookie identifies the flows used to drop ICMP type (3) destination unreachable, // fragmentation-needed (4) PmtudOpenFlowCookie = "0x0304" + // GARPCookie identifies the flows used to allow node IPs and drop other GARPs from CDN. + // Temp workaround until OVN has native supported for silencing GARPs on startup. + // https://issues.redhat.com/browse/FDP-1537 + GARPCookie = "0x0305" // CtMarkHost is the conntrack mark value for host traffic CtMarkHost = "0x2" ) diff --git a/go-controller/pkg/node/secondary_node_network_controller.go b/go-controller/pkg/node/user_defined_node_network_controller.go similarity index 78% rename from go-controller/pkg/node/secondary_node_network_controller.go rename to go-controller/pkg/node/user_defined_node_network_controller.go index e5c4eba83f..4b814c4253 100644 --- a/go-controller/pkg/node/secondary_node_network_controller.go +++ b/go-controller/pkg/node/user_defined_node_network_controller.go @@ -16,9 +16,9 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) -// SecondaryNodeNetworkController structure is the object which holds the controls for starting -// and reacting upon the watched resources (e.g. pods, endpoints) for secondary network -type SecondaryNodeNetworkController struct { +// UserDefinedNodeNetworkController structure is the object which holds the controls for starting +// and reacting upon the watched resources (e.g. pods, endpoints) for user-defined networks +type UserDefinedNodeNetworkController struct { BaseNodeNetworkController // pod events factory handler podHandler *factory.Handler @@ -26,19 +26,19 @@ type SecondaryNodeNetworkController struct { gateway *UserDefinedNetworkGateway } -// NewSecondaryNodeNetworkController creates a new OVN controller for creating logical network +// NewUserDefinedNodeNetworkController creates a new OVN controller for creating logical network // infrastructure and policy for the given secondary network. It supports layer3, layer2 and // localnet topology types. -func NewSecondaryNodeNetworkController( +func NewUserDefinedNodeNetworkController( cnnci *CommonNodeNetworkControllerInfo, netInfo util.NetInfo, networkManager networkmanager.Interface, vrfManager *vrfmanager.Controller, ruleManager *iprulemanager.Controller, defaultNetworkGateway Gateway, -) (*SecondaryNodeNetworkController, error) { +) (*UserDefinedNodeNetworkController, error) { - snnc := &SecondaryNodeNetworkController{ + snnc := &UserDefinedNodeNetworkController{ BaseNodeNetworkController: BaseNodeNetworkController{ CommonNodeNetworkControllerInfo: *cnnci, ReconcilableNetInfo: util.NewReconcilableNetInfo(netInfo), @@ -64,10 +64,10 @@ func NewSecondaryNodeNetworkController( } // Start starts the default controller; handles all events and creates all needed logical entities -func (nc *SecondaryNodeNetworkController) Start(_ context.Context) error { - klog.Infof("Start secondary node network controller of network %s", nc.GetNetworkName()) +func (nc *UserDefinedNodeNetworkController) Start(_ context.Context) error { + klog.Infof("Starting UDN node network controller for network %s", nc.GetNetworkName()) - // enable adding ovs ports for dpu pods in both primary and secondary user defined networks + // enable adding ovs ports for dpu pods in both primary and secondary user-defined networks if (config.OVNKubernetesFeature.EnableMultiNetwork || util.IsNetworkSegmentationSupportEnabled()) && config.OvnKubeNode.Mode == types.NodeModeDPU { handler, err := nc.watchPodsDPU() if err != nil { @@ -85,8 +85,8 @@ func (nc *SecondaryNodeNetworkController) Start(_ context.Context) error { } // Stop gracefully stops the controller -func (nc *SecondaryNodeNetworkController) Stop() { - klog.Infof("Stop secondary node network controller of network %s", nc.GetNetworkName()) +func (nc *UserDefinedNodeNetworkController) Stop() { + klog.Infof("Stopping UDN node network controller for network %s", nc.GetNetworkName()) close(nc.stopChan) nc.wg.Wait() @@ -95,15 +95,15 @@ func (nc *SecondaryNodeNetworkController) Stop() { } } -// Cleanup cleans up node entities for the given secondary network -func (nc *SecondaryNodeNetworkController) Cleanup() error { +// Cleanup cleans up node entities for the given user-defined network +func (nc *UserDefinedNodeNetworkController) Cleanup() error { if nc.gateway != nil { return nc.gateway.DelNetwork() } return nil } -func (nc *SecondaryNodeNetworkController) shouldReconcileNetworkChange(old, new util.NetInfo) bool { +func (nc *UserDefinedNodeNetworkController) shouldReconcileNetworkChange(old, new util.NetInfo) bool { wasUDNNetworkAdvertisedAtNode := util.IsPodNetworkAdvertisedAtNode(old, nc.name) isUDNNetworkAdvertisedAtNode := util.IsPodNetworkAdvertisedAtNode(new, nc.name) return wasUDNNetworkAdvertisedAtNode != isUDNNetworkAdvertisedAtNode @@ -113,7 +113,7 @@ func (nc *SecondaryNodeNetworkController) shouldReconcileNetworkChange(old, new // and the gateway mode: // 1. IP rules // 2. OpenFlows on br-ex bridge to forward traffic to correct ofports -func (nc *SecondaryNodeNetworkController) Reconcile(netInfo util.NetInfo) error { +func (nc *UserDefinedNodeNetworkController) Reconcile(netInfo util.NetInfo) error { reconcilePodNetwork := nc.shouldReconcileNetworkChange(nc.ReconcilableNetInfo, netInfo) err := util.ReconcileNetInfo(nc.ReconcilableNetInfo, netInfo) diff --git a/go-controller/pkg/node/secondary_node_network_controller_test.go b/go-controller/pkg/node/user_defined_node_network_controller_test.go similarity index 95% rename from go-controller/pkg/node/secondary_node_network_controller_test.go rename to go-controller/pkg/node/user_defined_node_network_controller_test.go index bd0fbaab09..af6a7a9018 100644 --- a/go-controller/pkg/node/secondary_node_network_controller_test.go +++ b/go-controller/pkg/node/user_defined_node_network_controller_test.go @@ -41,7 +41,7 @@ import ( . "github.com/onsi/gomega" ) -var _ = Describe("SecondaryNodeNetworkController", func() { +var _ = Describe("UserDefinedNodeNetworkController", func() { var ( networkID = "3" nad = ovntest.GenerateNAD("bluenet", "rednad", "greenamespace", @@ -85,7 +85,7 @@ var _ = Describe("SecondaryNodeNetworkController", func() { factoryMock.On("GetNodes").Return(nodeList, nil) NetInfo, err := util.ParseNADInfo(nad) Expect(err).NotTo(HaveOccurred()) - controller, err := NewSecondaryNodeNetworkController(&cnnci, NetInfo, nil, nil, nil, &gateway{}) + controller, err := NewUserDefinedNodeNetworkController(&cnnci, NetInfo, nil, nil, nil, &gateway{}) Expect(err).NotTo(HaveOccurred()) err = controller.Start(context.Background()) Expect(err).NotTo(HaveOccurred()) @@ -116,7 +116,7 @@ var _ = Describe("SecondaryNodeNetworkController", func() { Expect(err).NotTo(HaveOccurred()) getCreationFakeCommands(fexec, "ovn-k8s-mp3", mgtPortMAC, NetInfo.GetNetworkName(), "worker1", NetInfo.MTU()) ofm := getDummyOpenflowManager() - controller, err := NewSecondaryNodeNetworkController(&cnnci, NetInfo, nil, nil, nil, &gateway{openflowManager: ofm}) + controller, err := NewUserDefinedNodeNetworkController(&cnnci, NetInfo, nil, nil, nil, &gateway{openflowManager: ofm}) Expect(err).NotTo(HaveOccurred()) err = controller.Start(context.Background()) Expect(err).To(HaveOccurred()) // we don't have the gateway pieces setup so its expected to fail here @@ -144,7 +144,7 @@ var _ = Describe("SecondaryNodeNetworkController", func() { types.Layer3Topology, "100.128.0.0/16", types.NetworkRoleSecondary) NetInfo, err := util.ParseNADInfo(nad) Expect(err).NotTo(HaveOccurred()) - controller, err := NewSecondaryNodeNetworkController(&cnnci, NetInfo, nil, nil, nil, &gateway{}) + controller, err := NewUserDefinedNodeNetworkController(&cnnci, NetInfo, nil, nil, nil, &gateway{}) Expect(err).NotTo(HaveOccurred()) err = controller.Start(context.Background()) Expect(err).NotTo(HaveOccurred()) @@ -152,7 +152,7 @@ var _ = Describe("SecondaryNodeNetworkController", func() { }) }) -var _ = Describe("SecondaryNodeNetworkController: UserDefinedPrimaryNetwork Gateway functionality", func() { +var _ = Describe("UserDefinedNodeNetworkController: UserDefinedPrimaryNetwork Gateway functionality", func() { var ( nad = ovntest.GenerateNAD("bluenet", "rednad", "greenamespace", types.Layer3Topology, "100.128.0.0/16", types.NetworkRolePrimary) @@ -166,7 +166,7 @@ var _ = Describe("SecondaryNodeNetworkController: UserDefinedPrimaryNetwork Gate routeManager *routemanager.Controller ipRulesManager *iprulemanager.Controller v4NodeSubnet = "100.128.0.0/24" - v6NodeSubnet = "ae70::66/112" + v6NodeSubnet = "ae70::/112" mgtPort = fmt.Sprintf("%s%d", types.K8sMgmtIntfNamePrefix, netID) gatewayInterface = "eth0" gatewayBridge = "breth0" @@ -270,6 +270,7 @@ var _ = Describe("SecondaryNodeNetworkController: UserDefinedPrimaryNetwork Gate config.IPv6Mode = true config.IPv4Mode = true config.Gateway.NodeportEnable = true + config.Kubernetes.ServiceCIDRs = ovntest.MustParseIPNets("172.16.1.0/24", "fd02::/112") ifAddrs := ovntest.MustParseIPNets(v4NodeIP, v6NodeIP) By("creating necessary mocks") @@ -418,15 +419,15 @@ var _ = Describe("SecondaryNodeNetworkController: UserDefinedPrimaryNetwork Gate Expect(err).NotTo(HaveOccurred()) localGw.openflowManager.syncFlows() - By("creating secondary network controller for user defined primary network") + By("creating a UDN controller for user-defined primary network") cnnci := CommonNodeNetworkControllerInfo{name: nodeName, watchFactory: &factoryMock} - controller, err := NewSecondaryNodeNetworkController(&cnnci, NetInfo, nil, vrf, ipRulesManager, localGw) + controller, err := NewUserDefinedNodeNetworkController(&cnnci, NetInfo, nil, vrf, ipRulesManager, localGw) Expect(err).NotTo(HaveOccurred()) Expect(controller.gateway).To(Not(BeNil())) Expect(controller.gateway.ruleManager).To(Not(BeNil())) controller.gateway.kubeInterface = &kubeMock - By("starting secondary network controller for user defined primary network") + By("starting UDN controller for user-defined primary network") err = controller.Start(context.Background()) Expect(err).NotTo(HaveOccurred()) diff --git a/go-controller/pkg/ovn/base_network_controller.go b/go-controller/pkg/ovn/base_network_controller.go index 491899184e..ea72526b10 100644 --- a/go-controller/pkg/ovn/base_network_controller.go +++ b/go-controller/pkg/ovn/base_network_controller.go @@ -283,9 +283,9 @@ func (oc *BaseNetworkController) doReconcile(reconcileRoutes, reconcilePendingPo } } -// BaseSecondaryNetworkController structure holds per-network fields and network specific -// configuration for secondary network controller -type BaseSecondaryNetworkController struct { +// BaseUserDefinedNetworkController structure holds per-network fields and network specific +// configuration for UDN controller +type BaseUserDefinedNetworkController struct { BaseNetworkController // network policy events factory handler @@ -294,7 +294,7 @@ type BaseSecondaryNetworkController struct { multiNetPolicyHandler *factory.Handler } -func (oc *BaseSecondaryNetworkController) FilterOutResource(objType reflect.Type, obj interface{}) bool { +func (oc *BaseUserDefinedNetworkController) FilterOutResource(objType reflect.Type, obj interface{}) bool { switch objType { case factory.NamespaceType: ns, ok := obj.(*corev1.Namespace) @@ -343,19 +343,19 @@ func NewCommonNetworkControllerInfo(client clientset.Interface, kube *kube.KubeO } func (bnc *BaseNetworkController) GetLogicalPortName(pod *corev1.Pod, nadName string) string { - if !bnc.IsSecondary() { + if !bnc.IsUserDefinedNetwork() { return util.GetLogicalPortName(pod.Namespace, pod.Name) } else { - return util.GetSecondaryNetworkLogicalPortName(pod.Namespace, pod.Name, nadName) + return util.GetUserDefinedNetworkLogicalPortName(pod.Namespace, pod.Name, nadName) } } func (bnc *BaseNetworkController) AddConfigDurationRecord(kind, namespace, name string) ( []ovsdb.Operation, func(), time.Time, error) { - if !bnc.IsSecondary() { + if !bnc.IsUserDefinedNetwork() { return recorders.GetConfigDurationRecorder().AddOVN(bnc.nbClient, kind, namespace, name) } - // TBD: no op for secondary network for now + // TBD: no-op for UDN for now return []ovsdb.Operation{}, func() {}, time.Time{}, nil } @@ -799,7 +799,7 @@ func (bnc *BaseNetworkController) syncNodeManagementPort(node *corev1.Node, swit IPPrefix: hostSubnet.String(), Nexthop: mgmtIfAddr.IP.String(), } - if bnc.IsSecondary() { + if bnc.IsUserDefinedNetwork() { lrsr.ExternalIDs = map[string]string{ types.NetworkExternalID: bnc.GetNetworkName(), types.TopologyExternalID: bnc.TopologyType(), @@ -878,8 +878,8 @@ func (bnc *BaseNetworkController) WatchNodes() error { } func (bnc *BaseNetworkController) recordNodeErrorEvent(node *corev1.Node, nodeErr error) { - if bnc.IsSecondary() { - // TBD, no op for secondary network for now + if bnc.IsUserDefinedNetwork() { + // TBD, noop for UDN for now return } nodeRef, err := ref.GetReference(scheme.Scheme, node) @@ -908,7 +908,7 @@ func (bnc *BaseNetworkController) doesNetworkRequireIPAM() bool { } func (bnc *BaseNetworkController) getPodNADNames(pod *corev1.Pod) []string { - if !bnc.IsSecondary() { + if !bnc.IsUserDefinedNetwork() { return []string{types.DefaultNetworkName} } podNadNames, _ := util.PodNadNames(pod, bnc.GetNetInfo()) @@ -1000,8 +1000,8 @@ func (bnc *BaseNetworkController) nodeZoneClusterChanged(oldNode, newNode *corev } func (bnc *BaseNetworkController) findMigratablePodIPsForSubnets(subnets []*net.IPNet) ([]*net.IPNet, error) { - // live migration is not supported in combination with secondary networks - if bnc.IsSecondary() { + // live migration is not supported in combination with UDNs + if bnc.IsUserDefinedNetwork() { return nil, nil } diff --git a/go-controller/pkg/ovn/base_network_controller_multicast.go b/go-controller/pkg/ovn/base_network_controller_multicast.go index 6f413177d5..0bfd7e3764 100644 --- a/go-controller/pkg/ovn/base_network_controller_multicast.go +++ b/go-controller/pkg/ovn/base_network_controller_multicast.go @@ -199,7 +199,7 @@ func (bnc *BaseNetworkController) createDefaultDenyMulticastPolicy() error { return err } - if !bnc.IsSecondary() { + if !bnc.IsUserDefinedNetwork() { // Remove old multicastDefaultDeny port group now that all ports // have been added to the clusterPortGroup by WatchPods() ops, err = libovsdbops.DeletePortGroupsOps(bnc.nbClient, ops, legacyMulticastDefaultDenyPortGroup) diff --git a/go-controller/pkg/ovn/base_network_controller_multipolicy.go b/go-controller/pkg/ovn/base_network_controller_multipolicy.go index d6ab0d984f..1c22ff0fb9 100644 --- a/go-controller/pkg/ovn/base_network_controller_multipolicy.go +++ b/go-controller/pkg/ovn/base_network_controller_multipolicy.go @@ -14,7 +14,7 @@ import ( const PolicyForAnnotation = "k8s.v1.cni.cncf.io/policy-for" -func (bsnc *BaseSecondaryNetworkController) syncMultiNetworkPolicies(multiPolicies []interface{}) error { +func (bsnc *BaseUserDefinedNetworkController) syncMultiNetworkPolicies(multiPolicies []interface{}) error { expectedPolicies := make(map[string]map[string]bool) for _, npInterface := range multiPolicies { policy, ok := npInterface.(*mnpapi.MultiNetworkPolicy) @@ -38,7 +38,7 @@ func (bsnc *BaseSecondaryNetworkController) syncMultiNetworkPolicies(multiPolici return bsnc.syncNetworkPoliciesCommon(expectedPolicies) } -func (bsnc *BaseSecondaryNetworkController) shouldApplyMultiPolicy(mpolicy *mnpapi.MultiNetworkPolicy) bool { +func (bsnc *BaseUserDefinedNetworkController) shouldApplyMultiPolicy(mpolicy *mnpapi.MultiNetworkPolicy) bool { policyForAnnot, ok := mpolicy.Annotations[PolicyForAnnotation] if !ok { klog.V(5).Infof("%s annotation not defined in multi-policy %s/%s", PolicyForAnnotation, diff --git a/go-controller/pkg/ovn/base_network_controller_namespace.go b/go-controller/pkg/ovn/base_network_controller_namespace.go index 7bc86f0bab..f980cd35b7 100644 --- a/go-controller/pkg/ovn/base_network_controller_namespace.go +++ b/go-controller/pkg/ovn/base_network_controller_namespace.go @@ -75,7 +75,7 @@ func (bnc *BaseNetworkController) shouldWatchNamespaces() bool { // - The network is secondary, and multi NetworkPolicies are enabled. return bnc.IsDefault() || bnc.IsPrimaryNetwork() && util.IsNetworkSegmentationSupportEnabled() || - bnc.IsSecondary() && util.IsMultiNetworkPoliciesSupportEnabled() + bnc.IsUserDefinedNetwork() && util.IsMultiNetworkPoliciesSupportEnabled() } // WatchNamespaces starts the watching of namespace resource and calls @@ -466,7 +466,7 @@ func (bsnc *BaseNetworkController) removeRemoteZonePodFromNamespaceAddressSet(po // tracked within the zone, nodeName will be empty which will force // canReleasePodIPs to lookup all nodes. nodeName := pod.Spec.NodeName - if !bsnc.IsSecondary() && kubevirt.IsPodLiveMigratable(pod) { + if !bsnc.IsUserDefinedNetwork() && kubevirt.IsPodLiveMigratable(pod) { nodeName, _ = bsnc.lsManager.GetSubnetName(podIfAddrs) } diff --git a/go-controller/pkg/ovn/base_network_controller_pods.go b/go-controller/pkg/ovn/base_network_controller_pods.go index 4427b17b63..e3abb1550c 100644 --- a/go-controller/pkg/ovn/base_network_controller_pods.go +++ b/go-controller/pkg/ovn/base_network_controller_pods.go @@ -105,7 +105,7 @@ func (bnc *BaseNetworkController) deleteStaleLogicalSwitchPorts(expectedLogicalP // get all switches that Pod logical port would be reside on. topoType := bnc.TopologyType() - if !bnc.IsSecondary() || topoType == ovntypes.Layer3Topology { + if !bnc.IsUserDefinedNetwork() || topoType == ovntypes.Layer3Topology { // for default network and layer3 topology type networks, get all local zone node switches nodes, err := bnc.GetLocalZoneNodes() if err != nil { @@ -404,7 +404,7 @@ func (bnc *BaseNetworkController) podExpectedInLogicalCache(pod *corev1.Pod) boo func (bnc *BaseNetworkController) getExpectedSwitchName(pod *corev1.Pod) (string, error) { switchName := pod.Spec.NodeName - if bnc.IsSecondary() { + if bnc.IsUserDefinedNetwork() { topoType := bnc.TopologyType() switch topoType { case ovntypes.Layer3Topology: @@ -552,14 +552,14 @@ func (bnc *BaseNetworkController) addLogicalPortToNetwork(pod *corev1.Pod, nadNa } // Although we have different code to allocate the pod annotation for the - // default network and secondary networks, at the time of this writing they + // default network and user-defined networks, at the time of this writing they // are functionally equivalent and the only reason to keep them separated is - // to make sure the secondary network code has no bugs before we switch to + // to make sure the UDN code has no bugs before we switch to // it for the default network as well. If at all possible, keep them // functionally equivalent going forward. var annotationUpdated bool - if bnc.IsSecondary() { - podAnnotation, annotationUpdated, err = bnc.allocatePodAnnotationForSecondaryNetwork(pod, existingLSP, nadName, network, networkRole) + if bnc.IsUserDefinedNetwork() { + podAnnotation, annotationUpdated, err = bnc.allocatePodAnnotationForUserDefinedNetwork(pod, existingLSP, nadName, network, networkRole) } else { podAnnotation, annotationUpdated, err = bnc.allocatePodAnnotation(pod, existingLSP, podDesc, nadName, network, networkRole) } @@ -590,7 +590,7 @@ func (bnc *BaseNetworkController) addLogicalPortToNetwork(pod *corev1.Pod, nadNa // add external ids lsp.ExternalIDs = map[string]string{"namespace": pod.Namespace, "pod": "true"} - if bnc.IsSecondary() { + if bnc.IsUserDefinedNetwork() { lsp.ExternalIDs[ovntypes.NetworkExternalID] = bnc.GetNetworkName() lsp.ExternalIDs[ovntypes.NADExternalID] = nadName lsp.ExternalIDs[ovntypes.TopologyExternalID] = bnc.TopologyType() @@ -708,7 +708,7 @@ func (bnc *BaseNetworkController) delLSPOps(logicalPort, switchName, } func (bnc *BaseNetworkController) deletePodFromNamespace(ns string, podIfAddrs []*net.IPNet, portUUID string) ([]ovsdb.Operation, error) { - // for secondary network, namespace may be not managed + // for UDN, namespace may be not managed nsInfo, nsUnlock := bnc.getNamespaceLocked(ns, true) if nsInfo == nil { return nil, nil @@ -935,9 +935,9 @@ func (bnc *BaseNetworkController) allocatePodAnnotation(pod *corev1.Pod, existin return podAnnotation, true, nil } -// allocatePodAnnotationForSecondaryNetwork and update the corresponding pod +// allocatePodAnnotationForUserDefinedNetwork and update the corresponding pod // annotation. -func (bnc *BaseNetworkController) allocatePodAnnotationForSecondaryNetwork(pod *corev1.Pod, lsp *nbdb.LogicalSwitchPort, +func (bnc *BaseNetworkController) allocatePodAnnotationForUserDefinedNetwork(pod *corev1.Pod, lsp *nbdb.LogicalSwitchPort, nadName string, network *nadapi.NetworkSelectionElement, networkRole string) (*util.PodAnnotation, bool, error) { switchName, err := bnc.getExpectedSwitchName(pod) if err != nil { @@ -1033,7 +1033,7 @@ func (bnc *BaseNetworkController) allocatesPodAnnotation() bool { func (bnc *BaseNetworkController) shouldReleaseDeletedPod(pod *corev1.Pod, switchName, nad string, podIfAddrs []*net.IPNet) (bool, error) { var err error var isMigratedSourcePodStale bool - if !bnc.IsSecondary() { + if !bnc.IsUserDefinedNetwork() { isMigratedSourcePodStale, err = kubevirt.IsMigratedSourcePodStale(bnc.watchFactory, pod) if err != nil { return false, err @@ -1065,7 +1065,7 @@ func (bnc *BaseNetworkController) shouldReleaseDeletedPod(pod *corev1.Pod, switc // tracked within the zone, nodeName will be empty which will force // canReleasePodIPs to lookup all nodes. nodeName := pod.Spec.NodeName - if !bnc.IsSecondary() && kubevirt.IsPodLiveMigratable(pod) { + if !bnc.IsUserDefinedNetwork() && kubevirt.IsPodLiveMigratable(pod) { nodeName, _ = bnc.lsManager.GetSubnetName(podIfAddrs) } @@ -1078,7 +1078,7 @@ func (bnc *BaseNetworkController) shouldReleaseDeletedPod(pod *corev1.Pod, switc } var shouldRelease bool - // for secondary network IPs allocated from cluster manager, we will check + // for user-defined network IPs allocated from cluster manager, we will check // if other pods are using the same IPs just in case we are processing // events in different order than cluster manager did (best effort, there // can still be issues with this) diff --git a/go-controller/pkg/ovn/base_network_controller_policy.go b/go-controller/pkg/ovn/base_network_controller_policy.go index 3be9b444f8..8ee14de88a 100644 --- a/go-controller/pkg/ovn/base_network_controller_policy.go +++ b/go-controller/pkg/ovn/base_network_controller_policy.go @@ -786,7 +786,7 @@ func (bnc *BaseNetworkController) denyPGDeletePorts(np *networkPolicy, portNames // handleLocalPodSelectorAddFunc adds a new pod to an existing NetworkPolicy, should be retriable. func (bnc *BaseNetworkController) handleLocalPodSelectorAddFunc(np *networkPolicy, objs ...interface{}) error { - if !bnc.IsSecondary() && config.Metrics.EnableScaleMetrics { + if !bnc.IsUserDefinedNetwork() && config.Metrics.EnableScaleMetrics { start := time.Now() defer func() { duration := time.Since(start) @@ -832,7 +832,7 @@ func (bnc *BaseNetworkController) handleLocalPodSelectorAddFunc(np *networkPolic // handleLocalPodSelectorDelFunc handles delete event for local pod, should be retriable func (bnc *BaseNetworkController) handleLocalPodSelectorDelFunc(np *networkPolicy, objs ...interface{}) error { - if !bnc.IsSecondary() && config.Metrics.EnableScaleMetrics { + if !bnc.IsUserDefinedNetwork() && config.Metrics.EnableScaleMetrics { start := time.Now() defer func() { duration := time.Since(start) @@ -1177,7 +1177,7 @@ func (bnc *BaseNetworkController) setupGressPolicy(np *networkPolicy, gp *gressP // if addNetworkPolicy fails, create or delete operation can be retried func (bnc *BaseNetworkController) addNetworkPolicy(policy *knet.NetworkPolicy) error { klog.Infof("Adding network policy %s for network %s", getPolicyKey(policy), bnc.GetNetworkName()) - if !bnc.IsSecondary() && config.Metrics.EnableScaleMetrics { + if !bnc.IsUserDefinedNetwork() && config.Metrics.EnableScaleMetrics { start := time.Now() defer func() { duration := time.Since(start) @@ -1384,7 +1384,7 @@ type NetworkPolicyExtraParameters struct { } func (bnc *BaseNetworkController) handlePeerNamespaceSelectorAdd(np *networkPolicy, gp *gressPolicy, objs ...interface{}) error { - if !bnc.IsSecondary() && config.Metrics.EnableScaleMetrics { + if !bnc.IsUserDefinedNetwork() && config.Metrics.EnableScaleMetrics { start := time.Now() defer func() { duration := time.Since(start) @@ -1421,7 +1421,7 @@ func (bnc *BaseNetworkController) handlePeerNamespaceSelectorAdd(np *networkPoli } func (bnc *BaseNetworkController) handlePeerNamespaceSelectorDel(np *networkPolicy, gp *gressPolicy, objs ...interface{}) error { - if !bnc.IsSecondary() && config.Metrics.EnableScaleMetrics { + if !bnc.IsUserDefinedNetwork() && config.Metrics.EnableScaleMetrics { start := time.Now() defer func() { duration := time.Since(start) diff --git a/go-controller/pkg/ovn/base_network_controller_secondary.go b/go-controller/pkg/ovn/base_network_controller_user_defined.go similarity index 87% rename from go-controller/pkg/ovn/base_network_controller_secondary.go rename to go-controller/pkg/ovn/base_network_controller_user_defined.go index 106155b76e..da26a58350 100644 --- a/go-controller/pkg/ovn/base_network_controller_secondary.go +++ b/go-controller/pkg/ovn/base_network_controller_user_defined.go @@ -36,7 +36,7 @@ import ( utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" ) -func (bsnc *BaseSecondaryNetworkController) getPortInfoForSecondaryNetwork(pod *corev1.Pod) map[string]*lpInfo { +func (bsnc *BaseUserDefinedNetworkController) getPortInfoForUserDefinedNetwork(pod *corev1.Pod) map[string]*lpInfo { if util.PodWantsHostNetwork(pod) { return nil } @@ -44,35 +44,35 @@ func (bsnc *BaseSecondaryNetworkController) getPortInfoForSecondaryNetwork(pod * return portInfoMap } -// GetInternalCacheEntryForSecondaryNetwork returns the internal cache entry for this object, given an object and its type. +// GetInternalCacheEntryForUserDefinedNetwork returns the internal cache entry for this object, given an object and its type. // This is now used only for pods, which will get their the logical port cache entry. -func (bsnc *BaseSecondaryNetworkController) GetInternalCacheEntryForSecondaryNetwork(objType reflect.Type, obj interface{}) interface{} { +func (bsnc *BaseUserDefinedNetworkController) GetInternalCacheEntryForUserDefinedNetwork(objType reflect.Type, obj interface{}) interface{} { switch objType { case factory.PodType: pod := obj.(*corev1.Pod) - return bsnc.getPortInfoForSecondaryNetwork(pod) + return bsnc.getPortInfoForUserDefinedNetwork(pod) default: return nil } } -// AddSecondaryNetworkResourceCommon adds the specified object to the cluster according to its type and returns the error, -// if any, yielded during object creation. This function is called for secondary network only. -func (bsnc *BaseSecondaryNetworkController) AddSecondaryNetworkResourceCommon(objType reflect.Type, obj interface{}) error { +// AddUserDefinedNetworkResourceCommon adds the specified object to the cluster according to its type and returns the error, +// if any, yielded during object creation. This function is called for User Defined Networks only. +func (bsnc *BaseUserDefinedNetworkController) AddUserDefinedNetworkResourceCommon(objType reflect.Type, obj interface{}) error { switch objType { case factory.PodType: pod, ok := obj.(*corev1.Pod) if !ok { return fmt.Errorf("could not cast %T object to *knet.Pod", obj) } - return bsnc.ensurePodForSecondaryNetwork(pod, true) + return bsnc.ensurePodForUserDefinedNetwork(pod, true) case factory.NamespaceType: ns, ok := obj.(*corev1.Namespace) if !ok { return fmt.Errorf("could not cast %T object to *kapi.Namespace", obj) } - return bsnc.AddNamespaceForSecondaryNetwork(ns) + return bsnc.AddNamespaceForUserDefinedNetwork(ns) case factory.MultiNetworkPolicyType: mp, ok := obj.(*mnpapi.MultiNetworkPolicy) @@ -102,22 +102,22 @@ func (bsnc *BaseSecondaryNetworkController) AddSecondaryNetworkResourceCommon(ob return nil } -// UpdateSecondaryNetworkResourceCommon updates the specified object in the cluster to its version in newObj +// UpdateUserDefinedNetworkResourceCommon updates the specified object in the cluster to its version in newObj // according to its type and returns the error, if any, yielded during the object update. This function is -// called for secondary network only. +// called for User Defined Networks only. // Given an old and a new object; The inRetryCache boolean argument is to indicate if the given resource // is in the retryCache or not. -func (bsnc *BaseSecondaryNetworkController) UpdateSecondaryNetworkResourceCommon(objType reflect.Type, oldObj, newObj interface{}, inRetryCache bool) error { +func (bsnc *BaseUserDefinedNetworkController) UpdateUserDefinedNetworkResourceCommon(objType reflect.Type, oldObj, newObj interface{}, inRetryCache bool) error { switch objType { case factory.PodType: oldPod := oldObj.(*corev1.Pod) newPod := newObj.(*corev1.Pod) - return bsnc.ensurePodForSecondaryNetwork(newPod, shouldAddPort(oldPod, newPod, inRetryCache)) + return bsnc.ensurePodForUserDefinedNetwork(newPod, shouldAddPort(oldPod, newPod, inRetryCache)) case factory.NamespaceType: oldNs, newNs := oldObj.(*corev1.Namespace), newObj.(*corev1.Namespace) - return bsnc.updateNamespaceForSecondaryNetwork(oldNs, newNs) + return bsnc.updateNamespaceForUserDefinedNetwork(oldNs, newNs) case factory.MultiNetworkPolicyType: oldMp, ok := oldObj.(*mnpapi.MultiNetworkPolicy) @@ -164,11 +164,11 @@ func (bsnc *BaseSecondaryNetworkController) UpdateSecondaryNetworkResourceCommon return nil } -// DeleteResource deletes the object from the cluster according to the delete logic of its resource type. +// DeleteUserDefinedNetworkResourceCommon deletes the object from the cluster according to the delete logic of its resource type. // Given an object and optionally a cachedObj; cachedObj is the internal cache entry for this object, // used for now for pods. -// This function is called for secondary network only. -func (bsnc *BaseSecondaryNetworkController) DeleteSecondaryNetworkResourceCommon(objType reflect.Type, obj, cachedObj interface{}) error { +// This function is called for User Defined Networks only. +func (bsnc *BaseUserDefinedNetworkController) DeleteUserDefinedNetworkResourceCommon(objType reflect.Type, obj, cachedObj interface{}) error { switch objType { case factory.PodType: var portInfoMap map[string]*lpInfo @@ -177,11 +177,11 @@ func (bsnc *BaseSecondaryNetworkController) DeleteSecondaryNetworkResourceCommon if cachedObj != nil { portInfoMap = cachedObj.(map[string]*lpInfo) } - return bsnc.removePodForSecondaryNetwork(pod, portInfoMap) + return bsnc.removePodForUserDefinedNetwork(pod, portInfoMap) case factory.NamespaceType: ns := obj.(*corev1.Namespace) - return bsnc.deleteNamespace4SecondaryNetwork(ns) + return bsnc.deleteNamespaceForUserDefinedNetwork(ns) case factory.MultiNetworkPolicyType: mp, ok := obj.(*mnpapi.MultiNetworkPolicy) @@ -224,9 +224,9 @@ func (bsnc *BaseSecondaryNetworkController) DeleteSecondaryNetworkResourceCommon return nil } -// ensurePodForSecondaryNetwork tries to set up secondary network for a pod. It returns nil on success and error +// ensurePodForUserDefinedNetwork tries to set up the User Defined Network for a pod. It returns nil on success and error // on failure; failure indicates the pod set up should be retried later. -func (bsnc *BaseSecondaryNetworkController) ensurePodForSecondaryNetwork(pod *corev1.Pod, addPort bool) error { +func (bsnc *BaseUserDefinedNetworkController) ensurePodForUserDefinedNetwork(pod *corev1.Pod, addPort bool) error { // Try unscheduled pods later if !util.PodScheduled(pod) { @@ -301,7 +301,7 @@ func (bsnc *BaseSecondaryNetworkController) ensurePodForSecondaryNetwork(pod *co return nil } -func (bsnc *BaseSecondaryNetworkController) addLogicalPortToNetworkForNAD(pod *corev1.Pod, nadName, switchName string, +func (bsnc *BaseUserDefinedNetworkController) addLogicalPortToNetworkForNAD(pod *corev1.Pod, nadName, switchName string, network *nadapi.NetworkSelectionElement, kubevirtLiveMigrationStatus *kubevirt.LiveMigrationStatus) error { var libovsdbExecuteTime time.Duration @@ -381,7 +381,7 @@ func (bsnc *BaseSecondaryNetworkController) addLogicalPortToNetworkForNAD(pod *c if lsp != nil { portUUID = lsp.UUID } - addOps, err := bsnc.addPodToNamespaceForSecondaryNetwork(pod.Namespace, podAnnotation.IPs, portUUID) + addOps, err := bsnc.addPodToNamespaceForUserDefinedNetwork(pod.Namespace, podAnnotation.IPs, portUUID) if err != nil { return err } @@ -421,9 +421,9 @@ func (bsnc *BaseSecondaryNetworkController) addLogicalPortToNetworkForNAD(pod *c return nil } -// removePodForSecondaryNetwork tried to tear down a pod. It returns nil on success and error on failure; +// removePodForUserDefinedNetwork tried to tear down a pod. It returns nil on success and error on failure; // failure indicates the pod tear down should be retried later. -func (bsnc *BaseSecondaryNetworkController) removePodForSecondaryNetwork(pod *corev1.Pod, portInfoMap map[string]*lpInfo) error { +func (bsnc *BaseUserDefinedNetworkController) removePodForUserDefinedNetwork(pod *corev1.Pod, portInfoMap map[string]*lpInfo) error { if util.PodWantsHostNetwork(pod) || !util.PodScheduled(pod) { return nil } @@ -526,7 +526,7 @@ func (bsnc *BaseSecondaryNetworkController) removePodForSecondaryNetwork(pod *co // hasIPAMClaim determines whether a pod's IPAM is being handled by IPAMClaim CR. // pod passed should already be validated as having a network connection to nadName -func (bsnc *BaseSecondaryNetworkController) hasIPAMClaim(pod *corev1.Pod, nadNamespacedName string) (bool, error) { +func (bsnc *BaseUserDefinedNetworkController) hasIPAMClaim(pod *corev1.Pod, nadNamespacedName string) (bool, error) { if !bsnc.AllowsPersistentIPs() { return false, nil } @@ -577,7 +577,7 @@ func (bsnc *BaseSecondaryNetworkController) hasIPAMClaim(pod *corev1.Pod, nadNam return hasIPAMClaim, nil } -func (bsnc *BaseSecondaryNetworkController) syncPodsForSecondaryNetwork(pods []interface{}) error { +func (bsnc *BaseUserDefinedNetworkController) syncPodsForUserDefinedNetwork(pods []interface{}) error { annotatedLocalPods := map[*corev1.Pod]map[string]*util.PodAnnotation{} // get the list of logical switch ports (equivalent to pods). Reserve all existing Pod IPs to // avoid subsequent new Pods getting the same duplicate Pod IP. @@ -655,10 +655,10 @@ func (bsnc *BaseSecondaryNetworkController) syncPodsForSecondaryNetwork(pods []i return bsnc.deleteStaleLogicalSwitchPorts(expectedLogicalPorts) } -// addPodToNamespaceForSecondaryNetwork returns the ops needed to add pod's IP to the namespace's address set. -func (bsnc *BaseSecondaryNetworkController) addPodToNamespaceForSecondaryNetwork(ns string, ips []*net.IPNet, portUUID string) ([]ovsdb.Operation, error) { +// addPodToNamespaceForUserDefinedNetwork returns the ops needed to add pod's IP to the namespace's address set. +func (bsnc *BaseUserDefinedNetworkController) addPodToNamespaceForUserDefinedNetwork(ns string, ips []*net.IPNet, portUUID string) ([]ovsdb.Operation, error) { var err error - nsInfo, nsUnlock, err := bsnc.ensureNamespaceLockedForSecondaryNetwork(ns, true, nil) + nsInfo, nsUnlock, err := bsnc.ensureNamespaceLockedForUserDefinedNetwork(ns, true, nil) if err != nil { return nil, fmt.Errorf("failed to ensure namespace locked: %v", err) } @@ -668,8 +668,8 @@ func (bsnc *BaseSecondaryNetworkController) addPodToNamespaceForSecondaryNetwork return bsnc.addLocalPodToNamespaceLocked(nsInfo, ips, portUUID) } -// AddNamespaceForSecondaryNetwork creates corresponding addressset in ovn db for secondary network -func (bsnc *BaseSecondaryNetworkController) AddNamespaceForSecondaryNetwork(ns *corev1.Namespace) error { +// AddNamespaceForUserDefinedNetwork creates corresponding addressset in ovn db for User Defined Network +func (bsnc *BaseUserDefinedNetworkController) AddNamespaceForUserDefinedNetwork(ns *corev1.Namespace) error { klog.Infof("[%s] adding namespace for network %s", ns.Name, bsnc.GetNetworkName()) // Keep track of how long syncs take. start := time.Now() @@ -677,7 +677,7 @@ func (bsnc *BaseSecondaryNetworkController) AddNamespaceForSecondaryNetwork(ns * klog.Infof("[%s] adding namespace took %v for network %s", ns.Name, time.Since(start), bsnc.GetNetworkName()) }() - _, nsUnlock, err := bsnc.ensureNamespaceLockedForSecondaryNetwork(ns.Name, false, ns) + _, nsUnlock, err := bsnc.ensureNamespaceLockedForUserDefinedNetwork(ns.Name, false, ns) if err != nil { return fmt.Errorf("failed to ensure namespace locked: %v", err) } @@ -693,14 +693,14 @@ func (bsnc *BaseSecondaryNetworkController) AddNamespaceForSecondaryNetwork(ns * return nil } -// ensureNamespaceLockedForSecondaryNetwork locks namespacesMutex, gets/creates an entry for ns, configures OVN nsInfo, +// ensureNamespaceLockedForUserDefinedNetwork locks namespacesMutex, gets/creates an entry for ns, configures OVN nsInfo, // and returns it with its mutex locked. // ns is the name of the namespace, while namespace is the optional k8s namespace object -func (bsnc *BaseSecondaryNetworkController) ensureNamespaceLockedForSecondaryNetwork(ns string, readOnly bool, namespace *corev1.Namespace) (*namespaceInfo, func(), error) { +func (bsnc *BaseUserDefinedNetworkController) ensureNamespaceLockedForUserDefinedNetwork(ns string, readOnly bool, namespace *corev1.Namespace) (*namespaceInfo, func(), error) { return bsnc.ensureNamespaceLockedCommon(ns, readOnly, namespace, bsnc.getAllNamespacePodAddresses, bsnc.configureNamespaceCommon) } -func (bsnc *BaseSecondaryNetworkController) updateNamespaceForSecondaryNetwork(old, newer *corev1.Namespace) error { +func (bsnc *BaseUserDefinedNetworkController) updateNamespaceForUserDefinedNetwork(old, newer *corev1.Namespace) error { var errors []error klog.Infof("[%s] updating namespace for network %s", old.Name, bsnc.GetNetworkName()) @@ -726,7 +726,7 @@ func (bsnc *BaseSecondaryNetworkController) updateNamespaceForSecondaryNetwork(o return utilerrors.Join(errors...) } -func (bsnc *BaseSecondaryNetworkController) deleteNamespace4SecondaryNetwork(ns *corev1.Namespace) error { +func (bsnc *BaseUserDefinedNetworkController) deleteNamespaceForUserDefinedNetwork(ns *corev1.Namespace) error { klog.Infof("[%s] deleting namespace for network %s", ns.Name, bsnc.GetNetworkName()) nsInfo, err := bsnc.deleteNamespaceLocked(ns.Name) @@ -746,7 +746,7 @@ func (bsnc *BaseSecondaryNetworkController) deleteNamespace4SecondaryNetwork(ns // WatchNetworkPolicy starts the watching of networkpolicy resource and calls // back the appropriate handler logic -func (bsnc *BaseSecondaryNetworkController) WatchNetworkPolicy() error { +func (bsnc *BaseUserDefinedNetworkController) WatchNetworkPolicy() error { if bsnc.netPolicyHandler != nil { return nil } @@ -760,7 +760,7 @@ func (bsnc *BaseSecondaryNetworkController) WatchNetworkPolicy() error { // WatchMultiNetworkPolicy starts the watching of multinetworkpolicy resource and calls // back the appropriate handler logic -func (bsnc *BaseSecondaryNetworkController) WatchMultiNetworkPolicy() error { +func (bsnc *BaseUserDefinedNetworkController) WatchMultiNetworkPolicy() error { if bsnc.multiNetPolicyHandler != nil { return nil } @@ -795,7 +795,7 @@ func cleanupPolicyLogicalEntities(nbClient libovsdbclient.Client, ops []ovsdb.Op // WatchIPAMClaims starts the watching of IPAMClaim resources and calls // back the appropriate handler logic -func (bsnc *BaseSecondaryNetworkController) WatchIPAMClaims() error { +func (bsnc *BaseUserDefinedNetworkController) WatchIPAMClaims() error { if bsnc.ipamClaimsHandler != nil { return nil } @@ -806,7 +806,7 @@ func (bsnc *BaseSecondaryNetworkController) WatchIPAMClaims() error { return err } -func (oc *BaseSecondaryNetworkController) allowPersistentIPs() bool { +func (oc *BaseUserDefinedNetworkController) allowPersistentIPs() bool { return config.OVNKubernetesFeature.EnablePersistentIPs && util.DoesNetworkRequireIPAM(oc.GetNetInfo()) && util.AllowsPersistentIPs(oc.GetNetInfo()) @@ -814,7 +814,7 @@ func (oc *BaseSecondaryNetworkController) allowPersistentIPs() bool { // buildUDNEgressSNAT is used to build the conditional SNAT required on L3 and L2 UDNs to // steer traffic correctly via mp0 when leaving OVN to the host -func (bsnc *BaseSecondaryNetworkController) buildUDNEgressSNAT(localPodSubnets []*net.IPNet, outputPort string, isUDNAdvertised bool) ([]*nbdb.NAT, error) { +func (bsnc *BaseUserDefinedNetworkController) buildUDNEgressSNAT(localPodSubnets []*net.IPNet, outputPort string, isUDNAdvertised bool) ([]*nbdb.NAT, error) { if len(localPodSubnets) == 0 { return nil, nil // nothing to do } @@ -920,7 +920,7 @@ func getClusterNodesDestinationBasedSNATMatch(ipFamily utilnet.IPFamily, address } } -func (bsnc *BaseSecondaryNetworkController) ensureDHCP(pod *corev1.Pod, podAnnotation *util.PodAnnotation, lsp *nbdb.LogicalSwitchPort) error { +func (bsnc *BaseUserDefinedNetworkController) ensureDHCP(pod *corev1.Pod, podAnnotation *util.PodAnnotation, lsp *nbdb.LogicalSwitchPort) error { opts := []kubevirt.DHCPConfigsOpt{} ipv4DNSServer, ipv6DNSServer, err := kubevirt.RetrieveDNSServiceClusterIPs(bsnc.watchFactory) @@ -942,7 +942,7 @@ func (bsnc *BaseSecondaryNetworkController) ensureDHCP(pod *corev1.Pod, podAnnot return kubevirt.EnsureDHCPOptionsForLSP(bsnc.controllerName, bsnc.nbClient, pod, podAnnotation.IPs, lsp, opts...) } -func (bsnc *BaseSecondaryNetworkController) requireDHCP(pod *corev1.Pod) bool { +func (bsnc *BaseUserDefinedNetworkController) requireDHCP(pod *corev1.Pod) bool { // Configure DHCP only for kubevirt VMs layer2 primary udn with subnets return kubevirt.IsPodOwnedByVirtualMachine(pod) && util.IsNetworkSegmentationSupportEnabled() && @@ -950,7 +950,7 @@ func (bsnc *BaseSecondaryNetworkController) requireDHCP(pod *corev1.Pod) bool { bsnc.TopologyType() == types.Layer2Topology } -func (bsnc *BaseSecondaryNetworkController) setPodLogicalSwitchPortAddressesAndEnabledField( +func (bsnc *BaseUserDefinedNetworkController) setPodLogicalSwitchPortAddressesAndEnabledField( pod *corev1.Pod, nadName string, mac string, ips []string, enabled bool, ops []ovsdb.Operation) ([]ovsdb.Operation, *nbdb.LogicalSwitchPort, error) { lsp := &nbdb.LogicalSwitchPort{Name: bsnc.GetLogicalPortName(pod, nadName)} lsp.Enabled = ptr.To(enabled) @@ -985,7 +985,7 @@ func (bsnc *BaseSecondaryNetworkController) setPodLogicalSwitchPortAddressesAndE return ops, lsp, nil } -func (bsnc *BaseSecondaryNetworkController) disableLiveMigrationSourceLSPOps( +func (bsnc *BaseUserDefinedNetworkController) disableLiveMigrationSourceLSPOps( kubevirtLiveMigrationStatus *kubevirt.LiveMigrationStatus, nadName string, ops []ovsdb.Operation) ([]ovsdb.Operation, error) { // closing the sourcePod lsp to ensure traffic goes to the now ready targetPod. @@ -993,7 +993,7 @@ func (bsnc *BaseSecondaryNetworkController) disableLiveMigrationSourceLSPOps( return ops, err } -func (bsnc *BaseSecondaryNetworkController) enableSourceLSPFailedLiveMigration(pod *corev1.Pod, nadName string, mac string, ips []string) error { +func (bsnc *BaseUserDefinedNetworkController) enableSourceLSPFailedLiveMigration(pod *corev1.Pod, nadName string, mac string, ips []string) error { kubevirtLiveMigrationStatus, err := kubevirt.DiscoverLiveMigrationStatus(bsnc.watchFactory, pod) if err != nil { return fmt.Errorf("failed to discover Live-migration status after pod termination: %w", err) diff --git a/go-controller/pkg/ovn/base_network_controller_secondary_test.go b/go-controller/pkg/ovn/base_network_controller_user_defined_test.go similarity index 95% rename from go-controller/pkg/ovn/base_network_controller_secondary_test.go rename to go-controller/pkg/ovn/base_network_controller_user_defined_test.go index 94e6347c28..7c38669c14 100644 --- a/go-controller/pkg/ovn/base_network_controller_secondary_test.go +++ b/go-controller/pkg/ovn/base_network_controller_user_defined_test.go @@ -20,7 +20,7 @@ import ( . "github.com/onsi/gomega" ) -var _ = Describe("BaseSecondaryNetworkController", func() { +var _ = Describe("BaseUserDefinedNetworkController", func() { var ( nad = ovntest.GenerateNAD("bluenet", "rednad", "greenamespace", types.Layer3Topology, "100.128.0.0/16", types.NetworkRolePrimary) @@ -79,8 +79,8 @@ var _ = Describe("BaseSecondaryNetworkController", func() { ) defer fakeOVN.shutdown() - Expect(fakeOVN.NewSecondaryNetworkController(layer2NAD)).To(Succeed()) - controller, ok := fakeOVN.secondaryControllers["bluenet"] + Expect(fakeOVN.NewUserDefinedNetworkController(layer2NAD)).To(Succeed()) + controller, ok := fakeOVN.userDefinedNetworkControllers["bluenet"] Expect(ok).To(BeTrue()) pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -221,8 +221,8 @@ var _ = Describe("BaseSecondaryNetworkController", func() { }, }, ) - Expect(fakeOVN.NewSecondaryNetworkController(nad)).To(Succeed()) - controller, ok := fakeOVN.secondaryControllers["bluenet"] + Expect(fakeOVN.NewUserDefinedNetworkController(nad)).To(Succeed()) + controller, ok := fakeOVN.userDefinedNetworkControllers["bluenet"] Expect(ok).To(BeTrue()) // inject a real networkManager instead of a fake one, so getActiveNetworkForNamespace will get called nadController, err := networkmanager.NewForZone("dummyZone", nil, fakeOVN.watcher) @@ -240,7 +240,7 @@ var _ = Describe("BaseSecondaryNetworkController", func() { var initialPodList []interface{} initialPodList = append(initialPodList, podWithNoNamespace) - err = controller.bnc.syncPodsForSecondaryNetwork(initialPodList) + err = controller.bnc.syncPodsForUserDefinedNetwork(initialPodList) Expect(err).NotTo(HaveOccurred()) }) diff --git a/go-controller/pkg/ovn/base_secondary_layer2_network_controller.go b/go-controller/pkg/ovn/base_secondary_layer2_network_controller.go index 83676df950..421314586b 100644 --- a/go-controller/pkg/ovn/base_secondary_layer2_network_controller.go +++ b/go-controller/pkg/ovn/base_secondary_layer2_network_controller.go @@ -18,14 +18,14 @@ import ( // method/structure shared by all layer 2 network controller, including localnet and layer2 network controllres. -// BaseSecondaryLayer2NetworkController structure holds per-network fields and network specific +// BaseLayer2UserDefinedNetworkController structure holds per-network fields and network specific // configuration for secondary layer2/localnet network controller -type BaseSecondaryLayer2NetworkController struct { - BaseSecondaryNetworkController +type BaseLayer2UserDefinedNetworkController struct { + BaseUserDefinedNetworkController } // stop gracefully stops the controller, and delete all logical entities for this network if requested -func (oc *BaseSecondaryLayer2NetworkController) stop() { +func (oc *BaseLayer2UserDefinedNetworkController) stop() { klog.Infof("Stop secondary %s network controller of network %s", oc.TopologyType(), oc.GetNetworkName()) close(oc.stopChan) oc.cancelableCtx.Cancel() @@ -56,7 +56,7 @@ func (oc *BaseSecondaryLayer2NetworkController) stop() { // cleanup cleans up logical entities for the given network, called from net-attach-def routine // could be called from a dummy Controller (only has CommonNetworkControllerInfo set) -func (oc *BaseSecondaryLayer2NetworkController) cleanup() error { +func (oc *BaseLayer2UserDefinedNetworkController) cleanup() error { netName := oc.GetNetworkName() klog.Infof("Delete OVN logical entities for network %s", netName) // delete layer 2 logical switches @@ -97,7 +97,7 @@ func (oc *BaseSecondaryLayer2NetworkController) cleanup() error { return nil } -func (oc *BaseSecondaryLayer2NetworkController) run() error { +func (oc *BaseLayer2UserDefinedNetworkController) run() error { // WatchNamespaces() should be started first because it has no other // dependencies, and WatchNodes() depends on it if err := oc.WatchNamespaces(); err != nil { @@ -161,7 +161,7 @@ func (oc *BaseSecondaryLayer2NetworkController) run() error { return nil } -func (oc *BaseSecondaryLayer2NetworkController) initializeLogicalSwitch(switchName string, clusterSubnets []config.CIDRNetworkEntry, excludeSubnets, reservedSubnets []*net.IPNet, clusterLoadBalancerGroupUUID, switchLoadBalancerGroupUUID string) (*nbdb.LogicalSwitch, error) { +func (oc *BaseLayer2UserDefinedNetworkController) initializeLogicalSwitch(switchName string, clusterSubnets []config.CIDRNetworkEntry, excludeSubnets, reservedSubnets []*net.IPNet, clusterLoadBalancerGroupUUID, switchLoadBalancerGroupUUID string) (*nbdb.LogicalSwitch, error) { logicalSwitch := nbdb.LogicalSwitch{ Name: switchName, ExternalIDs: util.GenerateExternalIDsForSwitchOrRouter(oc.GetNetInfo()), @@ -201,14 +201,14 @@ func (oc *BaseSecondaryLayer2NetworkController) initializeLogicalSwitch(switchNa return &logicalSwitch, nil } -func (oc *BaseSecondaryLayer2NetworkController) addUpdateNodeEvent(node *corev1.Node) error { +func (oc *BaseLayer2UserDefinedNetworkController) addUpdateNodeEvent(node *corev1.Node) error { if oc.isLocalZoneNode(node) { return oc.addUpdateLocalNodeEvent(node) } return oc.addUpdateRemoteNodeEvent(node) } -func (oc *BaseSecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *corev1.Node) error { +func (oc *BaseLayer2UserDefinedNetworkController) addUpdateLocalNodeEvent(node *corev1.Node) error { _, present := oc.localZoneNodes.LoadOrStore(node.Name, true) if !present { @@ -223,7 +223,7 @@ func (oc *BaseSecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *co return nil } -func (oc *BaseSecondaryLayer2NetworkController) addUpdateRemoteNodeEvent(node *corev1.Node) error { +func (oc *BaseLayer2UserDefinedNetworkController) addUpdateRemoteNodeEvent(node *corev1.Node) error { _, present := oc.localZoneNodes.Load(node.Name) if present { @@ -243,12 +243,12 @@ func (oc *BaseSecondaryLayer2NetworkController) addUpdateRemoteNodeEvent(node *c return nil } -func (oc *BaseSecondaryLayer2NetworkController) deleteNodeEvent(node *corev1.Node) error { +func (oc *BaseLayer2UserDefinedNetworkController) deleteNodeEvent(node *corev1.Node) error { oc.localZoneNodes.Delete(node.Name) return nil } -func (oc *BaseSecondaryLayer2NetworkController) syncNodes(nodes []interface{}) error { +func (oc *BaseLayer2UserDefinedNetworkController) syncNodes(nodes []interface{}) error { for _, tmp := range nodes { node, ok := tmp.(*corev1.Node) if !ok { @@ -264,7 +264,7 @@ func (oc *BaseSecondaryLayer2NetworkController) syncNodes(nodes []interface{}) e return nil } -func (oc *BaseSecondaryLayer2NetworkController) syncIPAMClaims(ipamClaims []interface{}) error { +func (oc *BaseLayer2UserDefinedNetworkController) syncIPAMClaims(ipamClaims []interface{}) error { switchName, err := oc.getExpectedSwitchName(dummyPod()) if err != nil { return err diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go index 080dd22d19..ed0ad36356 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go @@ -43,7 +43,6 @@ const ( // taken from k8s controller guidelines type Controller struct { // name of the controller that starts the ANP controller - // (values are default-network-controller, secondary-network-controller etc..) controllerName string sync.RWMutex anpClientSet anpclientset.Interface diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos.go b/go-controller/pkg/ovn/controller/network_qos/network_qos.go index 6f8f247541..04b9ceef9d 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos.go @@ -337,7 +337,7 @@ func (c *Controller) networkManagedByMe(networkSelectors crdtypes.NetworkSelecto return false, err } case crdtypes.SecondaryUserDefinedNetworks: - if !c.IsSecondary() { + if !c.IsUserDefinedNetwork() { return false, nil } if networkSelector.SecondaryUserDefinedNetworkSelector == nil { @@ -395,7 +395,7 @@ func (c *Controller) getLogicalSwitchName(nodeName string) string { return c.GetNetworkScopedSwitchName(types.OVNLayer2Switch) case c.TopologyType() == types.LocalnetTopology: return c.GetNetworkScopedSwitchName(types.OVNLocalnetSwitch) - case !c.IsSecondary() || c.TopologyType() == types.Layer3Topology: + case !c.IsUserDefinedNetwork() || c.TopologyType() == types.Layer3Topology: return c.GetNetworkScopedSwitchName(nodeName) default: return "" diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go index febc4d1953..06d75719b5 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go @@ -45,7 +45,7 @@ func (c *Controller) addQoSToLogicalSwitch(qosState *networkQoSState, switchName Match: generateNetworkQoSMatch(qosState, rule, ipv4Enabled, ipv6Enabled), Priority: rule.Priority, } - if c.IsSecondary() { + if c.IsUserDefinedNetwork() { qos.ExternalIDs[types.NetworkExternalID] = c.GetNetworkName() } if rule.Dscp >= 0 { diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go index 4d771825ed..4b79f1198b 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go @@ -44,7 +44,7 @@ func (n *primaryNetInfoWrapper) IsPrimaryNetwork() bool { return true } -func (n *primaryNetInfoWrapper) IsSecondary() bool { +func (n *primaryNetInfoWrapper) IsUserDefinedNetwork() bool { return false } @@ -64,7 +64,7 @@ func (n *secondaryNetInfoWrapper) IsPrimaryNetwork() bool { return false } -func (n *secondaryNetInfoWrapper) IsSecondary() bool { +func (n *secondaryNetInfoWrapper) IsUserDefinedNetwork() bool { return true } @@ -1011,7 +1011,7 @@ var _ = Describe("NetworkQoS Controller", func() { err = libovsdbops.CreateOrUpdateLogicalSwitch(nbClient, secondarySwitch) Expect(err).NotTo(HaveOccurred()) - // Wrap the NetInfo with our custom implementation that returns true for IsSecondary() + // Wrap the NetInfo with our custom implementation that returns true for IsUserDefinedNetwork() secNetWrapper := &secondaryNetInfoWrapper{NetInfo: secondaryNadInfo} initNetworkQoSController(secNetWrapper, addressset.NewFakeAddressSetFactory("secondary-controller"), "secondary-controller", enableInterconnect) diff --git a/go-controller/pkg/ovn/default_network_controller.go b/go-controller/pkg/ovn/default_network_controller.go index dae1aaf71e..27f1dda688 100644 --- a/go-controller/pkg/ovn/default_network_controller.go +++ b/go-controller/pkg/ovn/default_network_controller.go @@ -312,10 +312,15 @@ func (oc *DefaultNetworkController) syncDb() error { } // NAT syncer must only be run once. It performs OVN NAT updates. - nadSyncer := nat.NewNATSyncer(oc.nbClient, oc.controllerName) - if err = nadSyncer.Sync(); err != nil { + natSyncer := nat.NewNATSyncer(oc.nbClient, oc.controllerName) + if err = natSyncer.Sync(); err != nil { return fmt.Errorf("failed to sync NATs: %v", err) } + + // Find ACLs with legacy DBIDs and update them from secondary -> user-defined + if err := oc.syncUDNIsolation(); err != nil { + return err + } return nil } diff --git a/go-controller/pkg/ovn/egressip.go b/go-controller/pkg/ovn/egressip.go index 881195ffc7..31a48d0c99 100644 --- a/go-controller/pkg/ovn/egressip.go +++ b/go-controller/pkg/ovn/egressip.go @@ -8,6 +8,7 @@ import ( "net" "reflect" "slices" + "sort" "strings" "sync" "time" @@ -808,7 +809,7 @@ func (e *EgressIPController) addPodEgressIPAssignments(ni util.NetInfo, name str } var remainingAssignments []egressipv1.EgressIPStatusItem nadName := ni.GetNetworkName() - if ni.IsSecondary() { + if ni.IsUserDefinedNetwork() { nadNames := ni.GetNADs() if len(nadNames) == 0 { return fmt.Errorf("expected at least one NAD name for Namespace %s", pod.Namespace) @@ -868,7 +869,11 @@ func (e *EgressIPController) addPodEgressIPAssignments(ni util.NetInfo, name str } for _, status := range remainingAssignments { klog.V(2).Infof("Adding pod egress IP status: %v for EgressIP: %s and pod: %s/%s/%v", status, name, pod.Namespace, pod.Name, podIPNets) - err = e.nodeZoneState.DoWithLock(status.Node, func(_ string) error { + nodesToLock := []string{status.Node, pod.Spec.NodeName} + // Sort nodes lexicographically to ensure node locks are acquired in + // same order, preventing deadlock situations. + sort.Strings(nodesToLock) + err = e.nodeZoneState.DoWithLock(nodesToLock[0], func(_ string) error { if status.Node == pod.Spec.NodeName { // we are safe, no need to grab lock again if err := e.addPodEgressIPAssignment(ni, name, status, mark, pod, podIPNets); err != nil { @@ -877,7 +882,7 @@ func (e *EgressIPController) addPodEgressIPAssignments(ni util.NetInfo, name str podState.egressStatuses.statusMap[status] = "" return nil } - return e.nodeZoneState.DoWithLock(pod.Spec.NodeName, func(_ string) error { + return e.nodeZoneState.DoWithLock(nodesToLock[1], func(_ string) error { // we need to grab lock again for pod's node if err := e.addPodEgressIPAssignment(ni, name, status, mark, pod, podIPNets); err != nil { return fmt.Errorf("unable to create egressip configuration for pod %s/%s/%v, err: %w", pod.Namespace, pod.Name, podIPNets, err) @@ -1041,7 +1046,11 @@ func (e *EgressIPController) deletePodEgressIPAssignments(ni util.NetInfo, name continue } klog.V(2).Infof("Deleting pod egress IP status: %v for EgressIP: %s and pod: %s/%s", statusToRemove, name, pod.Name, pod.Namespace) - err := e.nodeZoneState.DoWithLock(statusToRemove.Node, func(_ string) error { + nodesToLock := []string{statusToRemove.Node, pod.Spec.NodeName} + // Sort nodes lexicographically to ensure node locks are acquired in + // same order, preventing deadlock situations. + sort.Strings(nodesToLock) + err := e.nodeZoneState.DoWithLock(nodesToLock[0], func(_ string) error { if statusToRemove.Node == pod.Spec.NodeName { // we are safe, no need to grab lock again if err := e.deletePodEgressIPAssignment(ni, name, statusToRemove, pod); err != nil { @@ -1050,7 +1059,7 @@ func (e *EgressIPController) deletePodEgressIPAssignments(ni util.NetInfo, name podStatus.egressStatuses.delete(statusToRemove) return nil } - return e.nodeZoneState.DoWithLock(pod.Spec.NodeName, func(_ string) error { + return e.nodeZoneState.DoWithLock(nodesToLock[1], func(_ string) error { if err := e.deletePodEgressIPAssignment(ni, name, statusToRemove, pod); err != nil { return err } @@ -1151,7 +1160,7 @@ type egressIPCache struct { egressNodeRedirectsCache nodeNetworkRedirects // network name -> OVN cluster router name networkToRouter map[string]string - // packet mark for primary secondary networks + // packet mark for primary UDNs // EgressIP name -> mark markCache map[string]string } @@ -1993,7 +2002,7 @@ func (e *EgressIPController) generateCacheForEgressIP() (egressIPCache, error) { egressRemotePods: map[string]sets.Set[string]{}, } nadName := types.DefaultNetworkName - if ni.IsSecondary() { + if ni.IsUserDefinedNetwork() { nadNames := ni.GetNADs() if len(nadNames) == 0 { klog.Errorf("Network %s: error build egress IP sync cache, expected at least one NAD name for Namespace %s", ni.GetNetworkName(), namespace.Name) @@ -2297,7 +2306,7 @@ func (e *EgressIPController) addStandByEgressIPAssignment(ni util.NetInfo, podKe continue } eipToAssign = eipName // use the first EIP we find successfully - if ni.IsSecondary() { + if ni.IsUserDefinedNetwork() { mark = getEgressIPPktMark(eip.Name, eip.Annotations) } break @@ -2308,7 +2317,7 @@ func (e *EgressIPController) addStandByEgressIPAssignment(ni util.NetInfo, podKe } // get IPs nadName := ni.GetNetworkName() - if ni.IsSecondary() { + if ni.IsUserDefinedNetwork() { nadNames := ni.GetNADs() if len(nadNames) == 0 { return fmt.Errorf("expected at least one NAD name for Namespace %s", pod.Namespace) @@ -2391,7 +2400,7 @@ func (e *EgressIPController) addPodEgressIPAssignment(ni util.NetInfo, egressIPN return fmt.Errorf("unable to create NAT rule ops for status: %v, err: %v", status, err) } - } else if ni.IsSecondary() && ni.TopologyType() == types.Layer3Topology { + } else if ni.IsUserDefinedNetwork() && ni.TopologyType() == types.Layer3Topology { // not required for L2 because we always have LRPs using reroute action to pkt mark ops, err = e.createGWMarkPolicyOps(ni, ops, podIPs, status, mark, pod.Namespace, pod.Name, egressIPName) if err != nil { @@ -2415,7 +2424,7 @@ func (e *EgressIPController) addPodEgressIPAssignment(ni util.NetInfo, egressIPN // For L2, we always attach an LRP with reroute action to the Nodes gateway router. If the pod is remote, use the local zone Node name to generate the GW router name. nodeName := pod.Spec.NodeName - if loadedEgressNode && loadedPodNode && !isLocalZonePod && isLocalZoneEgressNode && ni.IsSecondary() && ni.TopologyType() == types.Layer2Topology { + if loadedEgressNode && loadedPodNode && !isLocalZonePod && isLocalZoneEgressNode && ni.IsUserDefinedNetwork() && ni.TopologyType() == types.Layer2Topology { nodeName = status.Node } routerName, err := getTopologyScopedRouterName(ni, nodeName) @@ -2426,7 +2435,7 @@ func (e *EgressIPController) addPodEgressIPAssignment(ni util.NetInfo, egressIPN // exec when node is local OR when pods are local or L2 UDN // don't add a reroute policy if the egress node towards which we are adding this doesn't exist if loadedEgressNode && loadedPodNode { - if isLocalZonePod || (isLocalZoneEgressNode && ni.IsSecondary() && ni.TopologyType() == types.Layer2Topology) { + if isLocalZonePod || (isLocalZoneEgressNode && ni.IsUserDefinedNetwork() && ni.TopologyType() == types.Layer2Topology) { ops, err = e.createReroutePolicyOps(ni, ops, podIPs, status, mark, egressIPName, nextHopIP, routerName, pod.Namespace, pod.Name) if err != nil { return fmt.Errorf("unable to create logical router policy ops, err: %v", err) @@ -2483,7 +2492,7 @@ func (e *EgressIPController) deletePodEgressIPAssignment(ni util.NetInfo, egress } // For L2, we always attach an LRP with reroute action to the Nodes gateway router. If the pod is remote, use the local zone Node name to generate the GW router name. nodeName := pod.Spec.NodeName - if !isLocalZonePod && isLocalZoneEgressNode && ni.IsSecondary() && ni.TopologyType() == types.Layer2Topology { + if !isLocalZonePod && isLocalZoneEgressNode && ni.IsUserDefinedNetwork() && ni.TopologyType() == types.Layer2Topology { nodeName = status.Node } routerName, err := getTopologyScopedRouterName(ni, nodeName) @@ -2502,7 +2511,7 @@ func (e *EgressIPController) deletePodEgressIPAssignment(ni util.NetInfo, egress // Case 1 - node where pod is hosted is not known // Case 2 - pod is within the local zone // case 3 - a local zone node is egress node and pod is attached to layer 2. For layer2, there is always an LRP attached to the egress Node GW router - if !loadedPodNode || isLocalZonePod || (isLocalZoneEgressNode && ni.IsSecondary() && ni.TopologyType() == types.Layer2Topology) { + if !loadedPodNode || isLocalZonePod || (isLocalZoneEgressNode && ni.IsUserDefinedNetwork() && ni.TopologyType() == types.Layer2Topology) { ops, err = e.deleteReroutePolicyOps(ni, ops, status, egressIPName, nextHopIP, routerName, pod.Namespace, pod.Name) if errors.Is(err, libovsdbclient.ErrNotFound) { // if the gateway router join IP setup is already gone, then don't count it as error. @@ -2525,7 +2534,7 @@ func (e *EgressIPController) deletePodEgressIPAssignment(ni util.NetInfo, egress if err != nil { return fmt.Errorf("unable to delete NAT rule for status: %v, err: %v", status, err) } - } else if ni.IsSecondary() && ni.TopologyType() == types.Layer3Topology { + } else if ni.IsUserDefinedNetwork() && ni.TopologyType() == types.Layer3Topology { ops, err = e.deleteGWMarkPolicyOps(ni, ops, status, pod.Namespace, pod.Name, egressIPName) if err != nil { return fmt.Errorf("unable to create GW router packet mark LRPs delete ops for pod %s/%s: %v", pod.Namespace, pod.Name, err) @@ -2788,7 +2797,7 @@ func (e *EgressIPController) getNextHop(ni util.NetInfo, egressNodeName, egressI return gatewayRouterIP.String(), nil } else { // for an egress IP assigned to a host secondary interface, next hop IP is the networks management port IP - if ni.IsSecondary() { + if ni.IsUserDefinedNetwork() { return "", fmt.Errorf("egress IP assigned to a host secondary interface for a user defined network (network name %s) is unsupported", ni.GetNetworkName()) } return e.getLocalMgmtPortNextHop(ni, egressNodeName, egressIPName, egressIP, isEgressIPv6) @@ -2826,7 +2835,7 @@ func (e *EgressIPController) createReroutePolicyOps(ni util.NetInfo, ops []ovsdb isEgressIPv6 := utilnet.IsIPv6String(status.EgressIP) ipFamily := getEIPIPFamily(isEgressIPv6) options := make(map[string]string) - if ni.IsSecondary() { + if ni.IsUserDefinedNetwork() { if !mark.IsAvailable() { return nil, fmt.Errorf("egressIP %s object must contain a mark for user defined networks", egressIPName) } @@ -3025,7 +3034,7 @@ func (e *EgressIPController) deleteEgressIPStatusSetup(ni util.NetInfo, name str if err != nil { return fmt.Errorf("error removing egress ip %s nats on router %s: %v", name, routerName, err) } - } else if ni.IsSecondary() { + } else if ni.IsUserDefinedNetwork() { if ops, err = e.deleteGWMarkPolicyForStatusOps(ni, ops, status, name); err != nil { return fmt.Errorf("failed to delete gateway mark policy: %v", err) } @@ -3582,7 +3591,7 @@ func (e *EgressIPController) getPodIPs(ni util.NetInfo, pod *corev1.Pod, nadName return nil, fmt.Errorf("failed to get pod %s/%s IPs", pod.Namespace, pod.Name) } podIPs = getIPFromIPNetFn(podIPNets) - } else if ni.IsSecondary() { + } else if ni.IsUserDefinedNetwork() { podIPNets := util.GetPodCIDRsWithFullMaskOfNetwork(pod, nadName) if len(podIPNets) == 0 { return nil, fmt.Errorf("failed to get pod %s/%s IPs", pod.Namespace, pod.Name) diff --git a/go-controller/pkg/ovn/egressip_test.go b/go-controller/pkg/ovn/egressip_test.go index b54dc50a42..7455d9ca40 100644 --- a/go-controller/pkg/ovn/egressip_test.go +++ b/go-controller/pkg/ovn/egressip_test.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "strings" + "sync" "time" "github.com/onsi/ginkgo/v2" @@ -11153,6 +11154,479 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" ), ) + ginkgo.It("should not deadlock when two EgressIP pods are hosted and egress through opposite nodes", func() { + // This test is designed to reproduce a deadlock issue. + // To reproduce undo the corrspoinding fix introduced in this commit and + // add a hard sleep of 10 seconds immediately before the code at line 1053 + // in egressip.go + // (https://github.com/ovn-kubernetes/ovn-kubernetes/blob/fdb1f44bb04cc7f90ba06ca2782d494d5a8068fc/go-controller/pkg/ovn/egressip.go#L1053). + // Note: Multiple test runs may be required to consistently reproduce this issue. + app.Action = func(*cli.Context) error { + config.OVNKubernetesFeature.EnableInterconnect = true + egressIP1 := "192.168.126.101" + egressIP2 := "192.168.126.102" + zone := "global" + node1IPv4OVN := "192.168.126.202/24" + node1IPv4TranSwitchIP := "100.88.0.2/16" + node1IPv4Addresses := []string{node1IPv4OVN} + node2IPv4OVN := "192.168.126.203/24" + node2IPv4TranSwitchIP := "100.88.0.3/16" + node2IPv4Addresses := []string{node2IPv4OVN} + + egressPod := *newPodWithLabels(eipNamespace, podName, node1Name, podV4IP, egressPodLabel) + egressNamespace := newNamespace(eipNamespace) + + egressPod2 := *newPodWithLabels(eipNamespace2, podName2, node2Name, podV4IP2, egressPodLabel) + egressNamespace2 := newNamespace(eipNamespace2) + + nodes := getIPv4Nodes([]nodeInfo{{node1IPv4Addresses, zone, node1IPv4TranSwitchIP}, {node2IPv4Addresses, zone, node2IPv4TranSwitchIP}}) + gomega.Expect(nodes).To(gomega.HaveLen(2)) + node1 := nodes[0] + node1.Labels = map[string]string{ + "k8s.ovn.org/egress-assignable": "", + } + node2 := nodes[1] + node2.Labels = map[string]string{ + "k8s.ovn.org/egress-assignable": "", + } + + // Assign EIPs on two different nodes and also ensure EIP is not on the node where its EIP pod is placed. + eIP := egressipv1.EgressIP{ + ObjectMeta: newEgressIPMeta(egressIPName), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{egressIP1}, + PodSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": egressNamespace.Name, + }, + }, + }, + Status: egressipv1.EgressIPStatus{ + Items: []egressipv1.EgressIPStatusItem{ + { + Node: node2.Name, + EgressIP: egressIP1, + }, + }, + }, + } + + eIP2 := egressipv1.EgressIP{ + ObjectMeta: newEgressIPMeta(egressIP2Name), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{egressIP2}, + PodSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": egressNamespace2.Name, + }, + }, + }, + Status: egressipv1.EgressIPStatus{ + Items: []egressipv1.EgressIPStatusItem{ + { + Node: node1.Name, + EgressIP: egressIP2, + }, + }, + }, + } + node1Switch := &nbdb.LogicalSwitch{ + UUID: node1.Name + "-UUID", + Name: node1.Name, + } + node2Switch := &nbdb.LogicalSwitch{ + UUID: node2.Name + "-UUID", + Name: node2.Name, + } + dynamicNeighRouters := "true" + if config.OVNKubernetesFeature.EnableInterconnect { + dynamicNeighRouters = "false" + } + + logicalRouterOptions := map[string]string{ + "dynamic_neigh_routers": dynamicNeighRouters, + } + + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + &nbdb.LogicalRouterPort{ + UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", + Name: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouterPort{ + UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name + "-UUID", + Name: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name, + Networks: []string{node2LogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouter{ + Name: types.OVNClusterRouter, + UUID: types.OVNClusterRouter + "-UUID", + }, + &nbdb.LogicalRouter{ + Name: types.GWRouterPrefix + node1.Name, + UUID: types.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID"}, + Options: logicalRouterOptions, + }, + &nbdb.LogicalRouter{ + Name: types.GWRouterPrefix + node2.Name, + UUID: types.GWRouterPrefix + node2.Name + "-UUID", + Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name + "-UUID"}, + Options: logicalRouterOptions, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + "nat-addresses": "router", + "exclude-lb-vips-from-garp": "true", + }, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + "nat-addresses": "router", + "exclude-lb-vips-from-garp": "true", + }, + }, + &nbdb.LogicalSwitch{ + UUID: types.ExternalSwitchPrefix + node1Name + "-UUID", + Name: types.ExternalSwitchPrefix + node1Name, + Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, + }, + &nbdb.LogicalSwitch{ + UUID: types.ExternalSwitchPrefix + node2Name + "-UUID", + Name: types.ExternalSwitchPrefix + node2Name, + Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, + }, + node1Switch, + node2Switch, + }, + }, + &egressipv1.EgressIPList{ + Items: []egressipv1.EgressIP{eIP, eIP2}, + }, + &corev1.NodeList{ + Items: []corev1.Node{node1, node2}, + }, + &corev1.NamespaceList{ + Items: []corev1.Namespace{*egressNamespace, *egressNamespace2}, + }) + + err := fakeOvn.controller.WatchEgressIPNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIPPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressNodes() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchEgressIP() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // Run the test for 10 times. + for range 10 { + // Create both pods in parallel to trigger pod add events for both EIPs and check it's handled gracefully. + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer ginkgo.GinkgoRecover() + defer wg.Done() + i, n, _ := net.ParseCIDR(podV4IP + "/23") + n.IP = i + fakeOvn.controller.logicalPortCache.add(&egressPod, "", types.DefaultNetworkName, "", nil, []*net.IPNet{n}) + _, err := fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressPod.Namespace).Create(context.TODO(), &egressPod, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + go func() { + defer ginkgo.GinkgoRecover() + defer wg.Done() + i, n, _ := net.ParseCIDR(podV4IP2 + "/23") + n.IP = i + fakeOvn.controller.logicalPortCache.add(&egressPod2, "", types.DefaultNetworkName, "", nil, []*net.IPNet{n}) + _, err := fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressPod2.Namespace).Create(context.TODO(), &egressPod2, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + wg.Wait() + + node1Switch.QOSRules = []string{"default-QoS-UUID"} + expectedNode2NatLogicalPort := "k8s-node2" + expectedNode1NatLogicalPort := "k8s-node1" + + egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) + egressIPServedPodsASv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP, podV4IP2}, types.DefaultNetworkName, + fakeOvn.controller.eIPC.controllerName) + ipNets, _ := util.ParseIPNets(node1IPv4Addresses) + egressNodeIPs := []string{} + for _, ipNet := range ipNets { + egressNodeIPs = append(egressNodeIPs, ipNet.IP.String()) + } + ipNets, _ = util.ParseIPNets(node2IPv4Addresses) + for _, ipNet := range ipNets { + egressNodeIPs = append(egressNodeIPs, ipNet.IP.String()) + } + + egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets(egressNodeIPs) + node1Switch.QOSRules = []string{"default-QoS-UUID"} + node2Switch.QOSRules = []string{"default-QoS-UUID"} + expectedDatabaseState := []libovsdbtest.TestData{ + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + }, + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: node2LogicalRouterIPv4, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP.Name, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "reroute-UUID", + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod2.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: nodeLogicalRouterIPv4, + ExternalIDs: getEgressIPLRPReRouteDbIDs(eIP2.Name, egressPod2.Namespace, egressPod2.Name, IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + UUID: "eip2-reroute-UUID", + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: "(ip4.src == $a8519615025667110816 || ip4.src == $a13607449821398607916) && ip4.dst == $a712973235162149816", + Action: nbdb.LogicalRouterPolicyActionAllow, + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + UUID: "no-reroute-node-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID", + LogicalIP: podV4IP, + ExternalIP: egressIP1, + ExternalIDs: getEgressIPNATDbIDs(egressIPName, egressPod.Namespace, egressPod.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &expectedNode2NatLogicalPort, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.NAT{ + UUID: "egressip2-nat-UUID", + LogicalIP: podV4IP2, + ExternalIP: egressIP2, + ExternalIDs: getEgressIPNATDbIDs(egressIP2Name, egressPod2.Namespace, egressPod2.Name, IPFamilyValueV4, fakeOvn.controller.controllerName).GetExternalIDs(), + Type: nbdb.NATTypeSNAT, + LogicalPort: &expectedNode1NatLogicalPort, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.LogicalRouter{ + Name: types.GWRouterPrefix + node1.Name, + UUID: types.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID"}, + Nat: []string{"egressip2-nat-UUID"}, + Options: logicalRouterOptions, + }, + &nbdb.LogicalRouter{ + Name: types.GWRouterPrefix + node2.Name, + UUID: types.GWRouterPrefix + node2.Name + "-UUID", + Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name + "-UUID"}, + Nat: []string{"egressip-nat-UUID"}, + Options: logicalRouterOptions, + }, + &nbdb.LogicalRouter{ + Name: types.OVNClusterRouter, + UUID: types.OVNClusterRouter + "-UUID", + Policies: []string{"reroute-UUID", "eip2-reroute-UUID", "default-no-reroute-UUID", "no-reroute-service-UUID", + "no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, + }, + &nbdb.LogicalRouterPort{ + UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", + Name: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouterPort{ + UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name + "-UUID", + Name: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name, + Networks: []string{node2LogicalRouterIfAddrV4}, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + "nat-addresses": "router", + "exclude-lb-vips-from-garp": "true", + }, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + "nat-addresses": "router", + "exclude-lb-vips-from-garp": "true", + }, + }, + &nbdb.LogicalSwitch{ + UUID: types.ExternalSwitchPrefix + node1Name + "-UUID", + Name: types.ExternalSwitchPrefix + node1Name, + Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, + }, + &nbdb.LogicalSwitch{ + UUID: types.ExternalSwitchPrefix + node2Name + "-UUID", + Name: types.ExternalSwitchPrefix + node2Name, + Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, + }, + node1Switch, + node2Switch, + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + egressSVCServedPodsASv4, + egressIPServedPodsASv4, + egressNodeIPsASv4, + } + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + + // Delete both pods in parallel to trigger pod delete events for both EIPs and check it's handled gracefully. + var wg2 sync.WaitGroup + wg2.Add(2) + go func() { + defer ginkgo.GinkgoRecover() + defer wg2.Done() + err := fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressPod.Namespace).Delete(context.TODO(), egressPod.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + go func() { + defer ginkgo.GinkgoRecover() + defer wg2.Done() + err := fakeOvn.fakeClient.KubeClient.CoreV1().Pods(egressPod2.Namespace).Delete(context.TODO(), egressPod2.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() + wg2.Wait() + + egressIPServedPodsASv4.Addresses = nil + expectedDatabaseState = []libovsdbtest.TestData{ + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "default-no-reroute-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToPodDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + }, + getNoReRouteReplyTrafficPolicy(types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToJoinDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: "(ip4.src == $a8519615025667110816 || ip4.src == $a13607449821398607916) && ip4.dst == $a712973235162149816", + Action: nbdb.LogicalRouterPolicyActionAllow, + Options: map[string]string{"pkt_mark": types.EgressIPNodeConnectionMark}, + UUID: "no-reroute-node-UUID", + ExternalIDs: getEgressIPLRPNoReRoutePodToNodeDbIDs(IPFamilyValueV4, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName).GetExternalIDs(), + }, + &nbdb.LogicalRouter{ + Name: types.GWRouterPrefix + node1.Name, + UUID: types.GWRouterPrefix + node1.Name + "-UUID", + Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID"}, + Options: logicalRouterOptions, + }, + &nbdb.LogicalRouter{ + Name: types.GWRouterPrefix + node2.Name, + UUID: types.GWRouterPrefix + node2.Name + "-UUID", + Ports: []string{types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name + "-UUID"}, + Options: logicalRouterOptions, + }, + &nbdb.LogicalRouter{ + Name: types.OVNClusterRouter, + UUID: types.OVNClusterRouter + "-UUID", + Policies: []string{"default-no-reroute-UUID", "no-reroute-service-UUID", + "no-reroute-node-UUID", "default-no-reroute-reply-traffic"}, + }, + &nbdb.LogicalRouterPort{ + UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name + "-UUID", + Name: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node1.Name, + Networks: []string{nodeLogicalRouterIfAddrV4}, + }, + &nbdb.LogicalRouterPort{ + UUID: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name + "-UUID", + Name: types.GWRouterToJoinSwitchPrefix + types.GWRouterPrefix + node2.Name, + Networks: []string{node2LogicalRouterIfAddrV4}, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + "nat-addresses": "router", + "exclude-lb-vips-from-garp": "true", + }, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + "nat-addresses": "router", + "exclude-lb-vips-from-garp": "true", + }, + }, + &nbdb.LogicalSwitch{ + UUID: types.ExternalSwitchPrefix + node1Name + "-UUID", + Name: types.ExternalSwitchPrefix + node1Name, + Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "-UUID"}, + }, + &nbdb.LogicalSwitch{ + UUID: types.ExternalSwitchPrefix + node2Name + "-UUID", + Name: types.ExternalSwitchPrefix + node2Name, + Ports: []string{types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "-UUID"}, + }, + node1Switch, + node2Switch, + getDefaultQoSRule(false, types.DefaultNetworkName, fakeOvn.controller.eIPC.controllerName), + egressSVCServedPodsASv4, + egressIPServedPodsASv4, + egressNodeIPsASv4, + } + gomega.Eventually(fakeOvn.nbClient).WithTimeout(30 * time.Second).Should(libovsdbtest.HaveData(expectedDatabaseState)) + } + + return nil + } + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + ginkgo.It("should ensure SNATs towards egressIP and nodeIP are correctly configured during egressIP re-assignment", func() { app.Action = func(*cli.Context) error { config.Gateway.DisableSNATMultipleGWs = true diff --git a/go-controller/pkg/ovn/egressip_udn_l2_test.go b/go-controller/pkg/ovn/egressip_udn_l2_test.go index 577e9d7646..581992ab6b 100644 --- a/go-controller/pkg/ovn/egressip_udn_l2_test.go +++ b/go-controller/pkg/ovn/egressip_udn_l2_test.go @@ -61,7 +61,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol layer2SwitchName = "ovn_layer2_switch" gwIP = "192.168.126.1" gwIP2 = "192.168.127.1" - secondaryNetworkID = "2" + userDefinedNetworkID = "2" ) getEgressIPStatusLen := func(egressIPName string) func() int { @@ -156,7 +156,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol netconf, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} netInfo, err := util.NewNetInfo(&netconf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -524,7 +524,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol netconf, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} netInfo, err := util.NewNetInfo(&netconf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -662,14 +662,14 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol fakeOvn.controller.zone = node1.Name fakeOvn.eIPController.zone = node1.Name fakeOvn.controller.logicalPortCache.add(&egressPodCDNLocal, "", ovntypes.DefaultNetworkName, "", nil, []*net.IPNet{nCDN}) - secConInfo, ok := fakeOvn.secondaryControllers[networkName1] + secConInfo, ok := fakeOvn.userDefinedNetworkControllers[networkName1] gomega.Expect(ok).To(gomega.BeTrue()) fakeOvn.controller.eIPC.nodeZoneState.Store(node1Name, true) fakeOvn.controller.eIPC.nodeZoneState.Store(node2Name, false) err = fakeOvn.networkManager.Start() gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer fakeOvn.networkManager.Stop() - // simulate Start() of secondary network controller + // simulate Start() of UDN controller err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(secConInfo.bnc.GetNetInfo(), &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(secConInfo.bnc.GetNetInfo(), node1Name) @@ -1030,7 +1030,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol netconf, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} netInfo, err := util.NewNetInfo(&netconf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1181,7 +1181,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.controller.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - secConInfo, ok := fakeOvn.secondaryControllers[networkName1] + secConInfo, ok := fakeOvn.userDefinedNetworkControllers[networkName1] gomega.Expect(ok).To(gomega.BeTrue()) // Add pod IPs to UDN cache iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") @@ -1512,7 +1512,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol netconf, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} netInfo, err := util.NewNetInfo(&netconf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1672,7 +1672,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.controller.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - secConInfo, ok := fakeOvn.secondaryControllers[networkName1] + secConInfo, ok := fakeOvn.userDefinedNetworkControllers[networkName1] gomega.Expect(ok).To(gomega.BeTrue()) // Add pod IPs to UDN cache iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") @@ -1876,7 +1876,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol netconf, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} netInfo, err := util.NewNetInfo(&netconf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2229,7 +2229,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol netconf, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} netInfo, err := util.NewNetInfo(&netconf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2383,7 +2383,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.controller.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - secConInfo, ok := fakeOvn.secondaryControllers[networkName1] + secConInfo, ok := fakeOvn.userDefinedNetworkControllers[networkName1] gomega.Expect(ok).To(gomega.BeTrue()) // Add pod IPs to UDN cache iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") @@ -2597,7 +2597,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol netconf, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} netInfo, err := util.NewNetInfo(&netconf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2734,7 +2734,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol fakeOvn.controller.logicalPortCache.add(&egressPodCDNLocal, "", ovntypes.DefaultNetworkName, "", nil, []*net.IPNet{nCDN}) fakeOvn.controller.zone = node1Name fakeOvn.eIPController.zone = node1Name - secConInfo, ok := fakeOvn.secondaryControllers[networkName1] + secConInfo, ok := fakeOvn.userDefinedNetworkControllers[networkName1] gomega.Expect(ok).To(gomega.BeTrue()) err = fakeOvn.eIPController.SyncLocalNodeZonesCache() gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/go-controller/pkg/ovn/egressip_udn_l3_test.go b/go-controller/pkg/ovn/egressip_udn_l3_test.go index 02b04f938d..28035e8374 100644 --- a/go-controller/pkg/ovn/egressip_udn_l3_test.go +++ b/go-controller/pkg/ovn/egressip_udn_l3_test.go @@ -41,26 +41,26 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol ) const ( - nadName1 = "nad1" - networkName1 = "network1" - networkName1_ = networkName1 + "_" - node1Name = "node1" - v4Net1 = "20.128.0.0/14" - v4Node1Net1 = "20.128.0.0/16" - v4Pod1IPNode1Net1 = "20.128.0.5" - podName3 = "egress-pod3" - v4Pod2IPNode1Net1 = "20.128.0.6" - v4Node1Tsp = "100.88.0.2" - node2Name = "node2" - v4Node2Net1 = "20.129.0.0/16" - v4Node2Tsp = "100.88.0.3" - podName4 = "egress-pod4" - v4Pod1IPNode2Net1 = "20.129.0.2" - v4Pod2IPNode2Net1 = "20.129.0.3" - eIP1Mark = 50000 - eIP2Mark = 50001 - secondaryNetworkID = "2" - //tnlKey = zoneinterconnect.BaseTransitSwitchTunnelKey + secondaryNetworkID + nadName1 = "nad1" + networkName1 = "network1" + networkName1_ = networkName1 + "_" + node1Name = "node1" + v4Net1 = "20.128.0.0/14" + v4Node1Net1 = "20.128.0.0/16" + v4Pod1IPNode1Net1 = "20.128.0.5" + podName3 = "egress-pod3" + v4Pod2IPNode1Net1 = "20.128.0.6" + v4Node1Tsp = "100.88.0.2" + node2Name = "node2" + v4Node2Net1 = "20.129.0.0/16" + v4Node2Tsp = "100.88.0.3" + podName4 = "egress-pod4" + v4Pod1IPNode2Net1 = "20.129.0.2" + v4Pod2IPNode2Net1 = "20.129.0.3" + eIP1Mark = 50000 + eIP2Mark = 50001 + userDefinedNetworkID = "2" + //tnlKey = zoneinterconnect.BaseTransitSwitchTunnelKey + userDefinedNetworkID tnlKey = "16711685" ) @@ -157,7 +157,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol netconf, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} netInfo, err := util.NewNetInfo(&netconf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -534,7 +534,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol netconf, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} netInfo, err := util.NewNetInfo(&netconf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -672,14 +672,14 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol fakeOvn.controller.zone = node1.Name fakeOvn.eIPController.zone = node1.Name fakeOvn.controller.logicalPortCache.add(&egressPodCDNLocal, "", ovntypes.DefaultNetworkName, "", nil, []*net.IPNet{nCDN}) - secConInfo, ok := fakeOvn.secondaryControllers[networkName1] + secConInfo, ok := fakeOvn.userDefinedNetworkControllers[networkName1] gomega.Expect(ok).To(gomega.BeTrue()) fakeOvn.controller.eIPC.nodeZoneState.Store(node1Name, true) fakeOvn.controller.eIPC.nodeZoneState.Store(node2Name, false) err = fakeOvn.networkManager.Start() gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer fakeOvn.networkManager.Stop() - // simulate Start() of secondary network controller + // simulate Start() of UDN controller err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(secConInfo.bnc.GetNetInfo(), &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(secConInfo.bnc.GetNetInfo(), node1Name) @@ -1055,7 +1055,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol netconf, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} netInfo, err := util.NewNetInfo(&netconf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1204,7 +1204,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.controller.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - secConInfo, ok := fakeOvn.secondaryControllers[networkName1] + secConInfo, ok := fakeOvn.userDefinedNetworkControllers[networkName1] gomega.Expect(ok).To(gomega.BeTrue()) // Add pod IPs to UDN cache iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") @@ -1791,7 +1791,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol netconf, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} netInfo, err := util.NewNetInfo(&netconf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1949,7 +1949,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.controller.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - secConInfo, ok := fakeOvn.secondaryControllers[networkName1] + secConInfo, ok := fakeOvn.userDefinedNetworkControllers[networkName1] gomega.Expect(ok).To(gomega.BeTrue()) // Add pod IPs to UDN cache iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") @@ -2161,7 +2161,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol netconf, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} netInfo, err := util.NewNetInfo(&netconf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2522,7 +2522,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol netconf, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} netInfo, err := util.NewNetInfo(&netconf) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2674,7 +2674,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.controller.WatchEgressIP() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - secConInfo, ok := fakeOvn.secondaryControllers[networkName1] + secConInfo, ok := fakeOvn.userDefinedNetworkControllers[networkName1] gomega.Expect(ok).To(gomega.BeTrue()) secConInfo.bnc.zone = node1.Name gomega.Expect(secConInfo.bnc.WatchNodes()).To(gomega.Succeed()) diff --git a/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go index 01808d3927..84586aa6b4 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go +++ b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go @@ -139,7 +139,7 @@ func (ps podsNetInfo) getPod(ip net.IP) (podNetInfo, error) { func (syncer *LRPSyncer) buildCDNPodCache() (podsNetInfo, podsNetInfo, error) { p := func(item *nbdb.LogicalSwitchPort) bool { - return item.ExternalIDs["pod"] == "true" && item.ExternalIDs[ovntypes.NADExternalID] == "" // ignore secondary network LSPs + return item.ExternalIDs["pod"] == "true" && item.ExternalIDs[ovntypes.NADExternalID] == "" // ignore UDN LSPs } lsps, err := libovsdbops.FindLogicalSwitchPortWithPredicate(syncer.nbClient, p) if err != nil { diff --git a/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go index ba81787817..f93862cb3a 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go +++ b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go @@ -124,7 +124,7 @@ func (n *NATSyncer) syncEgressIPNATs() error { func (n *NATSyncer) buildPodCache() (podsNetInfo, podsNetInfo, error) { p := func(item *nbdb.LogicalSwitchPort) bool { - return item.ExternalIDs["pod"] == "true" && item.ExternalIDs[ovntypes.NADExternalID] == "" // ignore secondary network LSPs + return item.ExternalIDs["pod"] == "true" && item.ExternalIDs[ovntypes.NADExternalID] == "" // ignore UDN LSPs } lsps, err := libovsdbops.FindLogicalSwitchPortWithPredicate(n.nbClient, p) if err != nil { diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index a6ce3704fb..4c4f63d3fa 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -282,7 +282,7 @@ func (gw *GatewayManager) createGWRouter(l3GatewayConfig *util.L3GatewayConfig, "physical_ips": strings.Join(physicalIPs, ","), } - if gw.netInfo.IsSecondary() { + if gw.netInfo.IsUserDefinedNetwork() { maps.Copy(logicalRouterExternalIDs, util.GenerateExternalIDsForSwitchOrRouter(gw.netInfo)) } @@ -344,7 +344,7 @@ func (gw *GatewayManager) createGWRouterPeerPort(nodeName string) error { libovsdbops.RouterPort: gwRouterPortName, }, } - if gw.netInfo.IsSecondary() { + if gw.netInfo.IsUserDefinedNetwork() { logicalSwitchPort.ExternalIDs = map[string]string{ types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), @@ -408,7 +408,7 @@ func (gw *GatewayManager) createGWRouterPort(hostSubnets []*net.IPNet, gwLRPJoin Networks: gwLRPNetworks, Options: options, } - if gw.netInfo.IsSecondary() { + if gw.netInfo.IsUserDefinedNetwork() { gwRouterPort.ExternalIDs = map[string]string{ types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), @@ -465,7 +465,7 @@ func (gw *GatewayManager) updateGWRouterStaticRoutes(clusterIPSubnet, drLRPIfAdd IPPrefix: entry.String(), Nexthop: drLRPIfAddr.IP.String(), } - if gw.netInfo.IsSecondary() { + if gw.netInfo.IsUserDefinedNetwork() { lrsr.ExternalIDs = map[string]string{ types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), @@ -495,7 +495,7 @@ func (gw *GatewayManager) updateGWRouterStaticRoutes(clusterIPSubnet, drLRPIfAdd OutputPort: &externalRouterPort, ExternalIDs: map[string]string{util.OvnNodeMasqCIDR: ""}, } - if gw.netInfo.IsSecondary() { + if gw.netInfo.IsUserDefinedNetwork() { lrsr.ExternalIDs = map[string]string{ types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), @@ -527,7 +527,7 @@ func (gw *GatewayManager) updateGWRouterStaticRoutes(clusterIPSubnet, drLRPIfAdd Nexthop: nextHop.String(), OutputPort: &externalRouterPort, } - if gw.netInfo.IsSecondary() { + if gw.netInfo.IsUserDefinedNetwork() { lrsr.ExternalIDs = map[string]string{ types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), @@ -559,7 +559,7 @@ func (gw *GatewayManager) updateClusterRouterStaticRoutes(hostSubnets []*net.IPN IPPrefix: gwLRPIP.String(), Nexthop: gwLRPIP.String(), } - if gw.netInfo.IsSecondary() { + if gw.netInfo.IsUserDefinedNetwork() { lrsr.ExternalIDs = map[string]string{ types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), @@ -599,7 +599,7 @@ func (gw *GatewayManager) updateClusterRouterStaticRoutes(hostSubnets []*net.IPN } if config.Gateway.Mode != config.GatewayModeLocal { - if gw.netInfo.IsSecondary() { + if gw.netInfo.IsUserDefinedNetwork() { lrsr.ExternalIDs = map[string]string{ types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), @@ -651,8 +651,8 @@ func (gw *GatewayManager) updateClusterRouterStaticRoutes(hostSubnets []*net.IPN // - DefaultNetworkController.updateNamespace // - EgressIPController.addExternalGWPodSNATOps // - EgressIPController.addPodEgressIPAssignment -// - SecondaryLayer2NetworkController.buildUDNEgressSNAT -// - SecondaryLayer3NetworkController.addUDNNodeSubnetEgressSNAT +// - Layer2UserDefinedNetworkController.buildUDNEgressSNAT +// - Layer3UserDefinedNetworkController.addUDNNodeSubnetEgressSNAT // use gateway config parameters to create SNAT rules on the gateway router, but some of them (not all) don't watch // gateway config changes and rely on the GatewayManager to update their SNAT rules. // Is it racy? Yes! @@ -736,7 +736,7 @@ func (gw *GatewayManager) updateGWRouterNAT(nodeName string, clusterIPSubnet []* externalIPs, gwLRPIPs []net.IP, gwRouter *nbdb.LogicalRouter) error { // REMOVEME(trozet) workaround - create join subnet SNAT to handle ICMP needs frag return var extIDs map[string]string - if gw.netInfo.IsSecondary() { + if gw.netInfo.IsUserDefinedNetwork() { extIDs = map[string]string{ types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), @@ -967,7 +967,7 @@ func (gw *GatewayManager) addExternalSwitch(prefix, interfaceID, gatewayRouter, Networks: externalRouterPortNetworks, Name: externalRouterPort, } - if gw.netInfo.IsSecondary() { + if gw.netInfo.IsUserDefinedNetwork() { externalLogicalRouterPort.ExternalIDs = map[string]string{ types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), @@ -996,7 +996,7 @@ func (gw *GatewayManager) addExternalSwitch(prefix, interfaceID, gatewayRouter, }, Name: interfaceID, } - if gw.netInfo.IsSecondary() { + if gw.netInfo.IsUserDefinedNetwork() { externalLogicalSwitchPort.ExternalIDs = map[string]string{ types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), @@ -1033,14 +1033,14 @@ func (gw *GatewayManager) addExternalSwitch(prefix, interfaceID, gatewayRouter, Addresses: []string{macAddress}, } - if gw.netInfo.IsSecondary() { + if gw.netInfo.IsUserDefinedNetwork() { externalLogicalSwitchPortToRouter.ExternalIDs = map[string]string{ types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), } } sw := nbdb.LogicalSwitch{Name: externalSwitch} - if gw.netInfo.IsSecondary() { + if gw.netInfo.IsUserDefinedNetwork() { sw.ExternalIDs = util.GenerateExternalIDsForSwitchOrRouter(gw.netInfo) } @@ -1281,8 +1281,8 @@ func (gw *GatewayManager) staticRouteCleanup(nextHops []net.IP, ipPrefix *net.IP ips.Insert(nextHop.String()) } p := func(item *nbdb.LogicalRouterStaticRoute) bool { - networkName, isSecondaryNetwork := item.ExternalIDs[types.NetworkExternalID] - if !isSecondaryNetwork { + networkName, isUserDefinedNetwork := item.ExternalIDs[types.NetworkExternalID] + if !isUserDefinedNetwork { networkName = types.DefaultNetworkName } if networkName != gw.netInfo.GetNetworkName() { @@ -1309,8 +1309,8 @@ func (gw *GatewayManager) policyRouteCleanup(nextHops []net.IP) { for _, nextHop := range nextHops { gwIP := nextHop.String() policyPred := func(item *nbdb.LogicalRouterPolicy) bool { - networkName, isSecondaryNetwork := item.ExternalIDs[types.NetworkExternalID] - if !isSecondaryNetwork { + networkName, isUserDefinedNetwork := item.ExternalIDs[types.NetworkExternalID] + if !isUserDefinedNetwork { networkName = types.DefaultNetworkName } if networkName != gw.netInfo.GetNetworkName() { @@ -1343,8 +1343,8 @@ func (gw *GatewayManager) removeLRPolicies(nodeName string) { managedNetworkName := gw.netInfo.GetNetworkName() p := func(item *nbdb.LogicalRouterPolicy) bool { - networkName, isSecondaryNetwork := item.ExternalIDs[types.NetworkExternalID] - if !isSecondaryNetwork { + networkName, isUserDefinedNetwork := item.ExternalIDs[types.NetworkExternalID] + if !isUserDefinedNetwork { networkName = types.DefaultNetworkName } if networkName != managedNetworkName { diff --git a/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go b/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go index 4f61101282..c0acb16214 100644 --- a/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go +++ b/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go @@ -238,7 +238,7 @@ func (pbr *PolicyBasedRoutesManager) createPolicyBasedRoutes(match, priority, ne Nexthops: []string{nexthops}, Action: nbdb.LogicalRouterPolicyActionReroute, } - if pbr.netInfo.IsSecondary() { + if pbr.netInfo.IsUserDefinedNetwork() { lrp.ExternalIDs = map[string]string{ ovntypes.NetworkExternalID: pbr.netInfo.GetNetworkName(), ovntypes.TopologyExternalID: pbr.netInfo.TopologyType(), diff --git a/go-controller/pkg/ovn/gatewayrouter/policybasedroutes_test.go b/go-controller/pkg/ovn/gatewayrouter/policybasedroutes_test.go index 5c2f479607..6eadb8e2ee 100644 --- a/go-controller/pkg/ovn/gatewayrouter/policybasedroutes_test.go +++ b/go-controller/pkg/ovn/gatewayrouter/policybasedroutes_test.go @@ -47,7 +47,7 @@ func (n network) generateTestData(nodeName string) []libovsdbtest.TestData { for _, lrp := range n.initialLRPs { lrpUUIDs = append(lrpUUIDs, lrp.UUID) var extID map[string]string - if n.info.IsSecondary() { + if n.info.IsUserDefinedNetwork() { extID = map[string]string{ types.NetworkExternalID: n.info.GetNetworkName(), types.TopologyExternalID: n.info.TopologyType(), diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller.go b/go-controller/pkg/ovn/layer2_user_defined_network_controller.go similarity index 84% rename from go-controller/pkg/ovn/secondary_layer2_network_controller.go rename to go-controller/pkg/ovn/layer2_user_defined_network_controller.go index 58679c448c..eb7bb05abd 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller.go +++ b/go-controller/pkg/ovn/layer2_user_defined_network_controller.go @@ -37,15 +37,15 @@ import ( // method/structure shared by all layer 2 network controller, including localnet and layer2 network controllres. -type secondaryLayer2NetworkControllerEventHandler struct { +type layer2UserDefinedNetworkControllerEventHandler struct { baseHandler baseNetworkControllerEventHandler watchFactory *factory.WatchFactory objType reflect.Type - oc *SecondaryLayer2NetworkController + oc *Layer2UserDefinedNetworkController syncFunc func([]interface{}) error } -func (h *secondaryLayer2NetworkControllerEventHandler) FilterOutResource(obj interface{}) bool { +func (h *layer2UserDefinedNetworkControllerEventHandler) FilterOutResource(obj interface{}) bool { return h.oc.FilterOutResource(h.objType, obj) } @@ -53,56 +53,56 @@ func (h *secondaryLayer2NetworkControllerEventHandler) FilterOutResource(obj int // type considers them equal and therefore no update is needed. It returns false when the two objects are not considered // equal and an update needs be executed. This is regardless of how the update is carried out (whether with a dedicated update // function or with a delete on the old obj followed by an add on the new obj). -func (h *secondaryLayer2NetworkControllerEventHandler) AreResourcesEqual(obj1, obj2 interface{}) (bool, error) { +func (h *layer2UserDefinedNetworkControllerEventHandler) AreResourcesEqual(obj1, obj2 interface{}) (bool, error) { return h.baseHandler.areResourcesEqual(h.objType, obj1, obj2) } // GetInternalCacheEntry returns the internal cache entry for this object, given an object and its type. // This is now used only for pods, which will get their the logical port cache entry. -func (h *secondaryLayer2NetworkControllerEventHandler) GetInternalCacheEntry(obj interface{}) interface{} { - return h.oc.GetInternalCacheEntryForSecondaryNetwork(h.objType, obj) +func (h *layer2UserDefinedNetworkControllerEventHandler) GetInternalCacheEntry(obj interface{}) interface{} { + return h.oc.GetInternalCacheEntryForUserDefinedNetwork(h.objType, obj) } // GetResourceFromInformerCache returns the latest state of the object, given an object key and its type. // from the informers cache. -func (h *secondaryLayer2NetworkControllerEventHandler) GetResourceFromInformerCache(key string) (interface{}, error) { +func (h *layer2UserDefinedNetworkControllerEventHandler) GetResourceFromInformerCache(key string) (interface{}, error) { return h.baseHandler.getResourceFromInformerCache(h.objType, h.watchFactory, key) } // RecordAddEvent records the add event on this given object. -func (h *secondaryLayer2NetworkControllerEventHandler) RecordAddEvent(obj interface{}) { +func (h *layer2UserDefinedNetworkControllerEventHandler) RecordAddEvent(obj interface{}) { h.baseHandler.recordAddEvent(h.objType, obj) } // RecordUpdateEvent records the udpate event on this given object. -func (h *secondaryLayer2NetworkControllerEventHandler) RecordUpdateEvent(obj interface{}) { +func (h *layer2UserDefinedNetworkControllerEventHandler) RecordUpdateEvent(obj interface{}) { h.baseHandler.recordUpdateEvent(h.objType, obj) } // RecordDeleteEvent records the delete event on this given object. -func (h *secondaryLayer2NetworkControllerEventHandler) RecordDeleteEvent(obj interface{}) { +func (h *layer2UserDefinedNetworkControllerEventHandler) RecordDeleteEvent(obj interface{}) { h.baseHandler.recordDeleteEvent(h.objType, obj) } // RecordSuccessEvent records the success event on this given object. -func (h *secondaryLayer2NetworkControllerEventHandler) RecordSuccessEvent(obj interface{}) { +func (h *layer2UserDefinedNetworkControllerEventHandler) RecordSuccessEvent(obj interface{}) { h.baseHandler.recordSuccessEvent(h.objType, obj) } // RecordErrorEvent records the error event on this given object. -func (h *secondaryLayer2NetworkControllerEventHandler) RecordErrorEvent(_ interface{}, _ string, _ error) { +func (h *layer2UserDefinedNetworkControllerEventHandler) RecordErrorEvent(_ interface{}, _ string, _ error) { } // IsResourceScheduled returns true if the given object has been scheduled. // Only applied to pods for now. Returns true for all other types. -func (h *secondaryLayer2NetworkControllerEventHandler) IsResourceScheduled(obj interface{}) bool { +func (h *layer2UserDefinedNetworkControllerEventHandler) IsResourceScheduled(obj interface{}) bool { return h.baseHandler.isResourceScheduled(h.objType, obj) } // AddResource adds the specified object to the cluster according to its type and returns the error, // if any, yielded during object creation. // Given an object to add and a boolean specifying if the function was executed from iterateRetryResources -func (h *secondaryLayer2NetworkControllerEventHandler) AddResource(obj interface{}, fromRetryLoop bool) error { +func (h *layer2UserDefinedNetworkControllerEventHandler) AddResource(obj interface{}, fromRetryLoop bool) error { switch h.objType { case factory.NodeType: node, ok := obj.(*corev1.Node) @@ -131,14 +131,14 @@ func (h *secondaryLayer2NetworkControllerEventHandler) AddResource(obj interface } return h.oc.addUpdateRemoteNodeEvent(node, config.OVNKubernetesFeature.EnableInterconnect) default: - return h.oc.AddSecondaryNetworkResourceCommon(h.objType, obj) + return h.oc.AddUserDefinedNetworkResourceCommon(h.objType, obj) } } // DeleteResource deletes the object from the cluster according to the delete logic of its resource type. // Given an object and optionally a cachedObj; cachedObj is the internal cache entry for this object, // used for now for pods and network policies. -func (h *secondaryLayer2NetworkControllerEventHandler) DeleteResource(obj, cachedObj interface{}) error { +func (h *layer2UserDefinedNetworkControllerEventHandler) DeleteResource(obj, cachedObj interface{}) error { switch h.objType { case factory.NodeType: node, ok := obj.(*corev1.Node) @@ -147,7 +147,7 @@ func (h *secondaryLayer2NetworkControllerEventHandler) DeleteResource(obj, cache } return h.oc.deleteNodeEvent(node) default: - return h.oc.DeleteSecondaryNetworkResourceCommon(h.objType, obj, cachedObj) + return h.oc.DeleteUserDefinedNetworkResourceCommon(h.objType, obj, cachedObj) } } @@ -155,7 +155,7 @@ func (h *secondaryLayer2NetworkControllerEventHandler) DeleteResource(obj, cache // type and returns the error, if any, yielded during the object update. // Given an old and a new object; The inRetryCache boolean argument is to indicate if the given resource // is in the retryCache or not. -func (h *secondaryLayer2NetworkControllerEventHandler) UpdateResource(oldObj, newObj interface{}, inRetryCache bool) error { +func (h *layer2UserDefinedNetworkControllerEventHandler) UpdateResource(oldObj, newObj interface{}, inRetryCache bool) error { switch h.objType { case factory.NodeType: newNode, ok := newObj.(*corev1.Node) @@ -205,7 +205,7 @@ func (h *secondaryLayer2NetworkControllerEventHandler) UpdateResource(oldObj, ne case factory.PodType: newPod := newObj.(*corev1.Pod) oldPod := oldObj.(*corev1.Pod) - if err := h.oc.ensurePodForSecondaryNetwork(newPod, shouldAddPort(oldPod, newPod, inRetryCache)); err != nil { + if err := h.oc.ensurePodForUserDefinedNetwork(newPod, shouldAddPort(oldPod, newPod, inRetryCache)); err != nil { return err } @@ -214,11 +214,11 @@ func (h *secondaryLayer2NetworkControllerEventHandler) UpdateResource(oldObj, ne } return nil default: - return h.oc.UpdateSecondaryNetworkResourceCommon(h.objType, oldObj, newObj, inRetryCache) + return h.oc.UpdateUserDefinedNetworkResourceCommon(h.objType, oldObj, newObj, inRetryCache) } } -func (h *secondaryLayer2NetworkControllerEventHandler) SyncFunc(objs []interface{}) error { +func (h *layer2UserDefinedNetworkControllerEventHandler) SyncFunc(objs []interface{}) error { var syncFunc func([]interface{}) error if h.syncFunc != nil { @@ -230,7 +230,7 @@ func (h *secondaryLayer2NetworkControllerEventHandler) SyncFunc(objs []interface syncFunc = h.oc.syncNodes case factory.PodType: - syncFunc = h.oc.syncPodsForSecondaryNetwork + syncFunc = h.oc.syncPodsForUserDefinedNetwork case factory.NamespaceType: syncFunc = h.oc.syncNamespaces @@ -256,14 +256,14 @@ func (h *secondaryLayer2NetworkControllerEventHandler) SyncFunc(objs []interface // IsObjectInTerminalState returns true if the given object is a in terminal state. // This is used now for pods that are either in a PodSucceeded or in a PodFailed state. -func (h *secondaryLayer2NetworkControllerEventHandler) IsObjectInTerminalState(obj interface{}) bool { +func (h *layer2UserDefinedNetworkControllerEventHandler) IsObjectInTerminalState(obj interface{}) bool { return h.baseHandler.isObjectInTerminalState(h.objType, obj) } -// SecondaryLayer2NetworkController is created for logical network infrastructure and policy -// for a secondary layer2 network -type SecondaryLayer2NetworkController struct { - BaseSecondaryLayer2NetworkController +// Layer2UserDefinedNetworkController is created for logical network infrastructure and policy +// for a layer2 UDN +type Layer2UserDefinedNetworkController struct { + BaseLayer2UserDefinedNetworkController // Node-specific syncMaps used by node event handler mgmtPortFailed sync.Map @@ -298,14 +298,14 @@ type SecondaryLayer2NetworkController struct { defaultGatewayReconciler *kubevirt.DefaultGatewayReconciler } -// NewSecondaryLayer2NetworkController create a new OVN controller for the given secondary layer2 nad -func NewSecondaryLayer2NetworkController( +// NewLayer2UserDefinedNetworkController create a new OVN controller for the given layer2 NAD +func NewLayer2UserDefinedNetworkController( cnci *CommonNetworkControllerInfo, netInfo util.NetInfo, networkManager networkmanager.Interface, routeImportManager routeimport.Manager, portCache *PortCache, - eIPController *EgressIPController) (*SecondaryLayer2NetworkController, error) { + eIPController *EgressIPController) (*Layer2UserDefinedNetworkController, error) { stopChan := make(chan struct{}) @@ -327,10 +327,10 @@ func NewSecondaryLayer2NetworkController( lsManager = lsm.NewL2SwitchManagerForUserDefinedPrimaryNetwork(gatewayIPs, mgmtIPs) } - oc := &SecondaryLayer2NetworkController{ - BaseSecondaryLayer2NetworkController: BaseSecondaryLayer2NetworkController{ + oc := &Layer2UserDefinedNetworkController{ + BaseLayer2UserDefinedNetworkController: BaseLayer2UserDefinedNetworkController{ - BaseSecondaryNetworkController: BaseSecondaryNetworkController{ + BaseUserDefinedNetworkController: BaseUserDefinedNetworkController{ BaseNetworkController: BaseNetworkController{ CommonNetworkControllerInfo: *cnci, controllerName: getNetworkControllerName(netInfo.GetNetworkName()), @@ -405,13 +405,13 @@ func NewSecondaryLayer2NetworkController( return oc, nil } -// Start starts the secondary layer2 controller, handles all events and creates all needed logical entities -func (oc *SecondaryLayer2NetworkController) Start(_ context.Context) error { - klog.Infof("Starting controller for secondary network %s", oc.GetNetworkName()) +// Start starts the layer2 UDN controller, handles all events and creates all needed logical entities +func (oc *Layer2UserDefinedNetworkController) Start(_ context.Context) error { + klog.Infof("Starting controller for UDN %s", oc.GetNetworkName()) start := time.Now() defer func() { - klog.Infof("Starting controller for secondary network %s took %v", oc.GetNetworkName(), time.Since(start)) + klog.Infof("Starting controller for UDN %s took %v", oc.GetNetworkName(), time.Since(start)) }() if err := oc.init(); err != nil { @@ -421,8 +421,8 @@ func (oc *SecondaryLayer2NetworkController) Start(_ context.Context) error { return oc.run() } -func (oc *SecondaryLayer2NetworkController) run() error { - err := oc.BaseSecondaryLayer2NetworkController.run() +func (oc *Layer2UserDefinedNetworkController) run() error { + err := oc.BaseLayer2UserDefinedNetworkController.run() if err != nil { return err } @@ -442,9 +442,9 @@ func (oc *SecondaryLayer2NetworkController) run() error { // Cleanup cleans up logical entities for the given network, called from net-attach-def routine // could be called from a dummy Controller (only has CommonNetworkControllerInfo set) -func (oc *SecondaryLayer2NetworkController) Cleanup() error { +func (oc *Layer2UserDefinedNetworkController) Cleanup() error { networkName := oc.GetNetworkName() - if err := oc.BaseSecondaryLayer2NetworkController.cleanup(); err != nil { + if err := oc.BaseLayer2UserDefinedNetworkController.cleanup(); err != nil { return fmt.Errorf("failed to cleanup network %q: %w", networkName, err) } @@ -476,7 +476,7 @@ func (oc *SecondaryLayer2NetworkController) Cleanup() error { return nil } -func (oc *SecondaryLayer2NetworkController) init() error { +func (oc *Layer2UserDefinedNetworkController) init() error { // Create default Control Plane Protection (COPP) entry for routers defaultCOPPUUID, err := EnsureDefaultCOPP(oc.nbClient) if err != nil { @@ -520,19 +520,19 @@ func (oc *SecondaryLayer2NetworkController) init() error { return err } -func (oc *SecondaryLayer2NetworkController) Stop() { - klog.Infof("Stoping controller for secondary network %s", oc.GetNetworkName()) - oc.BaseSecondaryLayer2NetworkController.stop() +func (oc *Layer2UserDefinedNetworkController) Stop() { + klog.Infof("Stoping controller for UDN %s", oc.GetNetworkName()) + oc.BaseLayer2UserDefinedNetworkController.stop() } -func (oc *SecondaryLayer2NetworkController) Reconcile(netInfo util.NetInfo) error { +func (oc *Layer2UserDefinedNetworkController) Reconcile(netInfo util.NetInfo) error { return oc.BaseNetworkController.reconcile( netInfo, func(node string) { oc.gatewaysFailed.Store(node, true) }, ) } -func (oc *SecondaryLayer2NetworkController) initRetryFramework() { +func (oc *Layer2UserDefinedNetworkController) initRetryFramework() { oc.retryNodes = oc.newRetryFramework(factory.NodeType) oc.retryPods = oc.newRetryFramework(factory.PodType) if oc.allocatesPodAnnotation() && oc.AllowsPersistentIPs() { @@ -556,9 +556,9 @@ func (oc *SecondaryLayer2NetworkController) initRetryFramework() { } // newRetryFramework builds and returns a retry framework for the input resource type; -func (oc *SecondaryLayer2NetworkController) newRetryFramework( +func (oc *Layer2UserDefinedNetworkController) newRetryFramework( objectType reflect.Type) *retry.RetryFramework { - eventHandler := &secondaryLayer2NetworkControllerEventHandler{ + eventHandler := &layer2UserDefinedNetworkControllerEventHandler{ baseHandler: baseNetworkControllerEventHandler{}, objType: objectType, watchFactory: oc.watchFactory, @@ -579,7 +579,7 @@ func (oc *SecondaryLayer2NetworkController) newRetryFramework( ) } -func (oc *SecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *corev1.Node, nSyncs *nodeSyncs) error { +func (oc *Layer2UserDefinedNetworkController) addUpdateLocalNodeEvent(node *corev1.Node, nSyncs *nodeSyncs) error { var errs []error if util.IsNetworkSegmentationSupportEnabled() && oc.IsPrimaryNetwork() { @@ -659,7 +659,7 @@ func (oc *SecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *corev1 } } - errs = append(errs, oc.BaseSecondaryLayer2NetworkController.addUpdateLocalNodeEvent(node)) + errs = append(errs, oc.BaseLayer2UserDefinedNetworkController.addUpdateLocalNodeEvent(node)) err := utilerrors.Join(errs...) if err != nil { @@ -668,7 +668,7 @@ func (oc *SecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *corev1 return err } -func (oc *SecondaryLayer2NetworkController) addUpdateRemoteNodeEvent(node *corev1.Node, syncZoneIC bool) error { +func (oc *Layer2UserDefinedNetworkController) addUpdateRemoteNodeEvent(node *corev1.Node, syncZoneIC bool) error { var errs []error if util.IsNetworkSegmentationSupportEnabled() && oc.IsPrimaryNetwork() { @@ -683,7 +683,7 @@ func (oc *SecondaryLayer2NetworkController) addUpdateRemoteNodeEvent(node *corev } } - errs = append(errs, oc.BaseSecondaryLayer2NetworkController.addUpdateRemoteNodeEvent(node)) + errs = append(errs, oc.BaseLayer2UserDefinedNetworkController.addUpdateRemoteNodeEvent(node)) err := utilerrors.Join(errs...) if err != nil { @@ -692,7 +692,7 @@ func (oc *SecondaryLayer2NetworkController) addUpdateRemoteNodeEvent(node *corev return err } -func (oc *SecondaryLayer2NetworkController) addPortForRemoteNodeGR(node *corev1.Node) error { +func (oc *Layer2UserDefinedNetworkController) addPortForRemoteNodeGR(node *corev1.Node) error { nodeJoinSubnetIPs, err := udn.GetGWRouterIPs(node, oc.GetNetInfo()) if err != nil { if util.IsAnnotationNotSetError(err) { @@ -745,7 +745,7 @@ func (oc *SecondaryLayer2NetworkController) addPortForRemoteNodeGR(node *corev1. return nil } -func (oc *SecondaryLayer2NetworkController) deleteNodeEvent(node *corev1.Node) error { +func (oc *Layer2UserDefinedNetworkController) deleteNodeEvent(node *corev1.Node) error { if err := oc.gatewayManagerForNode(node.Name).Cleanup(); err != nil { return fmt.Errorf("failed to cleanup gateway on node %q: %w", node.Name, err) } @@ -770,7 +770,7 @@ func (oc *SecondaryLayer2NetworkController) deleteNodeEvent(node *corev1.Node) e // If isUDNAdvertised is true, then we want to SNAT all packets that are coming from pods on this network // leaving towards nodeIPs on the cluster to masqueradeIP. If network is advertise then the SNAT looks like this: // "eth.dst == 0a:58:5d:5d:00:02 && (ip4.dst == $a712973235162149816)" "169.254.0.36" "93.93.0.0/16" -func (oc *SecondaryLayer2NetworkController) addOrUpdateUDNClusterSubnetEgressSNAT(localPodSubnets []*net.IPNet, gwRouterName string, isUDNAdvertised bool) error { +func (oc *Layer2UserDefinedNetworkController) addOrUpdateUDNClusterSubnetEgressSNAT(localPodSubnets []*net.IPNet, gwRouterName string, isUDNAdvertised bool) error { outputPort := types.GWRouterToJoinSwitchPrefix + gwRouterName nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort, isUDNAdvertised) if err != nil { @@ -789,7 +789,7 @@ func (oc *SecondaryLayer2NetworkController) addOrUpdateUDNClusterSubnetEgressSNA return nil } -func (oc *SecondaryLayer2NetworkController) nodeGatewayConfig(node *corev1.Node) (*GatewayConfig, error) { +func (oc *Layer2UserDefinedNetworkController) nodeGatewayConfig(node *corev1.Node) (*GatewayConfig, error) { l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(node) if err != nil { return nil, fmt.Errorf("failed to get node %s network %s L3 gateway config: %v", node.Name, oc.GetNetworkName(), err) @@ -840,7 +840,7 @@ func (oc *SecondaryLayer2NetworkController) nodeGatewayConfig(node *corev1.Node) }, nil } -func (oc *SecondaryLayer2NetworkController) newGatewayManager(nodeName string) *GatewayManager { +func (oc *Layer2UserDefinedNetworkController) newGatewayManager(nodeName string) *GatewayManager { return NewGatewayManagerForLayer2Topology( nodeName, oc.defaultCOPPUUID, @@ -852,7 +852,7 @@ func (oc *SecondaryLayer2NetworkController) newGatewayManager(nodeName string) * ) } -func (oc *SecondaryLayer2NetworkController) gatewayManagerForNode(nodeName string) *GatewayManager { +func (oc *Layer2UserDefinedNetworkController) gatewayManagerForNode(nodeName string) *GatewayManager { obj, isFound := oc.gatewayManagers.Load(nodeName) if !isFound { return oc.newGatewayManager(nodeName) @@ -870,7 +870,7 @@ func (oc *SecondaryLayer2NetworkController) gatewayManagerForNode(nodeName strin } } -func (oc *SecondaryLayer2NetworkController) gatewayOptions() []GatewayOption { +func (oc *Layer2UserDefinedNetworkController) gatewayOptions() []GatewayOption { var opts []GatewayOption if oc.clusterLoadBalancerGroupUUID != "" { opts = append(opts, WithLoadBalancerGroups( @@ -882,7 +882,7 @@ func (oc *SecondaryLayer2NetworkController) gatewayOptions() []GatewayOption { return opts } -func (oc *SecondaryLayer2NetworkController) StartServiceController(wg *sync.WaitGroup, runRepair bool) error { +func (oc *Layer2UserDefinedNetworkController) StartServiceController(wg *sync.WaitGroup, runRepair bool) error { useLBGroups := oc.clusterLoadBalancerGroupUUID != "" // use 5 workers like most of the kubernetes controllers in the kubernetes controller-manager // do not use LB templates for UDNs - OVN bug https://issues.redhat.com/browse/FDP-988 @@ -893,7 +893,7 @@ func (oc *SecondaryLayer2NetworkController) StartServiceController(wg *sync.Wait return nil } -func (oc *SecondaryLayer2NetworkController) updateLocalPodEvent(pod *corev1.Pod) error { +func (oc *Layer2UserDefinedNetworkController) updateLocalPodEvent(pod *corev1.Pod) error { if kubevirt.IsPodAllowedForMigration(pod, oc.GetNetInfo()) { kubevirtLiveMigrationStatus, err := kubevirt.DiscoverLiveMigrationStatus(oc.watchFactory, pod) if err != nil { @@ -908,7 +908,7 @@ func (oc *SecondaryLayer2NetworkController) updateLocalPodEvent(pod *corev1.Pod) return nil } -func (oc *SecondaryLayer2NetworkController) reconcileLiveMigrationTargetZone(kubevirtLiveMigrationStatus *kubevirt.LiveMigrationStatus) error { +func (oc *Layer2UserDefinedNetworkController) reconcileLiveMigrationTargetZone(kubevirtLiveMigrationStatus *kubevirt.LiveMigrationStatus) error { if oc.defaultGatewayReconciler == nil { return nil } diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller_test.go b/go-controller/pkg/ovn/layer2_user_defined_network_controller_test.go similarity index 90% rename from go-controller/pkg/ovn/secondary_layer2_network_controller_test.go rename to go-controller/pkg/ovn/layer2_user_defined_network_controller_test.go index 1079a14198..352ef1497f 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller_test.go +++ b/go-controller/pkg/ovn/layer2_user_defined_network_controller_test.go @@ -81,7 +81,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 2 network", func() { DescribeTable( "reconciles a new", - func(netInfo secondaryNetInfo, testConfig testConfiguration, gatewayMode config.GatewayMode) { + func(netInfo userDefinedNetInfo, testConfig testConfiguration, gatewayMode config.GatewayMode) { const podIdx = 0 podInfo := dummyL2TestPod(ns, netInfo, podIdx, podIdx) setupConfig(netInfo, testConfig, gatewayMode) @@ -90,7 +90,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 2 network", func() { const nodeIPv4CIDR = "192.168.126.202/24" By(fmt.Sprintf("Creating a node named %q, with IP: %s", nodeName, nodeIPv4CIDR)) - testNode, err := newNodeWithSecondaryNets(nodeName, nodeIPv4CIDR) + testNode, err := newNodeWithUserDefinedNetworks(nodeName, nodeIPv4CIDR) Expect(err).NotTo(HaveOccurred()) Expect(setupFakeOvnForLayer2Topology(fakeOvn, initialDB, netInfo, testNode, podInfo, pod)).To(Succeed()) @@ -122,7 +122,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 2 network", func() { By("asserting the OVN entities provisioned in the NBDB are the expected ones") Eventually(fakeOvn.nbClient).Should( libovsdbtest.HaveData( - newSecondaryNetworkExpectationMachine( + newUserDefinedNetworkExpectationMachine( fakeOvn, []testPod{podInfo}, expectationOptions..., @@ -181,7 +181,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 2 network", func() { DescribeTable( "reconciles a new kubevirt-related pod during its live-migration phases", - func(netInfo secondaryNetInfo, testConfig testConfiguration, migrationInfo *liveMigrationInfo) { + func(netInfo userDefinedNetInfo, testConfig testConfiguration, migrationInfo *liveMigrationInfo) { ipamClaim := ipamclaimsapi.IPAMClaim{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns, @@ -196,11 +196,11 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 2 network", func() { netInfo.ipamClaimReference = ipamClaim.Name const ( - sourcePodInfoIdx = 0 - targetPodInfoIdx = 1 - secondaryNetworkIdx = 0 + sourcePodInfoIdx = 0 + targetPodInfoIdx = 1 + userDefinedNetworkIdx = 0 ) - sourcePodInfo := dummyL2TestPod(ns, netInfo, sourcePodInfoIdx, secondaryNetworkIdx) + sourcePodInfo := dummyL2TestPod(ns, netInfo, sourcePodInfoIdx, userDefinedNetworkIdx) setupConfig(netInfo, testConfig, config.GatewayModeShared) app.Action = func(*cli.Context) error { sourcePod := newMultiHomedKubevirtPod( @@ -211,7 +211,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 2 network", func() { const nodeIPv4CIDR = "192.168.126.202/24" By(fmt.Sprintf("Creating a node named %q, with IP: %s", nodeName, nodeIPv4CIDR)) - testNode, err := newNodeWithSecondaryNets(nodeName, nodeIPv4CIDR) + testNode, err := newNodeWithUserDefinedNetworks(nodeName, nodeIPv4CIDR) Expect(err).NotTo(HaveOccurred()) Expect(setupFakeOvnForLayer2Topology(fakeOvn, initialDB, netInfo, testNode, sourcePodInfo, sourcePod, @@ -245,13 +245,13 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 2 network", func() { By("asserting the OVN entities provisioned in the NBDB are the expected ones before migration started") Eventually(fakeOvn.nbClient).Should( libovsdbtest.HaveData( - newSecondaryNetworkExpectationMachine( + newUserDefinedNetworkExpectationMachine( fakeOvn, []testPod{sourcePodInfo}, expectationOptions..., ).expectedLogicalSwitchesAndPorts(netInfo.isPrimary)...)) - targetPodInfo := dummyL2TestPod(ns, netInfo, targetPodInfoIdx, secondaryNetworkIdx) + targetPodInfo := dummyL2TestPod(ns, netInfo, targetPodInfoIdx, userDefinedNetworkIdx) targetKvPod := newMultiHomedKubevirtPod( migrationInfo.vmName, migrationInfo.targetPodInfo, @@ -272,7 +272,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 2 network", func() { } Eventually(fakeOvn.nbClient).Should( libovsdbtest.HaveData( - newSecondaryNetworkExpectationMachine( + newUserDefinedNetworkExpectationMachine( fakeOvn, testPods, expectationOptions..., @@ -345,8 +345,8 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 2 network", func() { ) DescribeTable( - "secondary network controller DB entities are properly cleaned up", - func(netInfo secondaryNetInfo, testConfig testConfiguration) { + "user-defined network controller DB entities are properly cleaned up", + func(netInfo userDefinedNetInfo, testConfig testConfiguration) { podInfo := dummyTestPod(ns, netInfo) if testConfig.configToOverride != nil { config.OVNKubernetesFeature = *testConfig.configToOverride @@ -370,10 +370,10 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 2 network", func() { *netConf, ) Expect(err).NotTo(HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} const nodeIPv4CIDR = "192.168.126.202/24" - testNode, err := newNodeWithSecondaryNets(nodeName, nodeIPv4CIDR) + testNode, err := newNodeWithUserDefinedNetworks(nodeName, nodeIPv4CIDR) Expect(err).NotTo(HaveOccurred()) gwConfig, err := util.ParseNodeL3GatewayAnnotation(testNode) @@ -429,23 +429,23 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 2 network", func() { Expect(fakeOvn.networkManager.Start()).To(Succeed()) defer fakeOvn.networkManager.Stop() - secondaryNetController, ok := fakeOvn.secondaryControllers[secondaryNetworkName] + udnNetController, ok := fakeOvn.userDefinedNetworkControllers[userDefinedNetworkName] Expect(ok).To(BeTrue()) - fullSecondaryController, ok := fakeOvn.fullSecondaryL2Controllers[secondaryNetworkName] + fullUDNController, ok := fakeOvn.fullL2UDNControllers[userDefinedNetworkName] Expect(ok).To(BeTrue()) - err = fullSecondaryController.init() + err = fullUDNController.init() Expect(err).NotTo(HaveOccurred()) - secondaryNetController.bnc.ovnClusterLRPToJoinIfAddrs = dummyJoinIPs() - podInfo.populateSecondaryNetworkLogicalSwitchCache(secondaryNetController) - Expect(secondaryNetController.bnc.WatchNodes()).To(Succeed()) - Expect(secondaryNetController.bnc.WatchPods()).To(Succeed()) + udnNetController.bnc.ovnClusterLRPToJoinIfAddrs = dummyJoinIPs() + podInfo.populateUserDefinedNetworkLogicalSwitchCache(udnNetController) + Expect(udnNetController.bnc.WatchNodes()).To(Succeed()) + Expect(udnNetController.bnc.WatchPods()).To(Succeed()) Expect(fakeOvn.fakeClient.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{})).To(Succeed()) Expect(fakeOvn.fakeClient.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(nad.Namespace).Delete(context.Background(), nad.Name, metav1.DeleteOptions{})).To(Succeed()) - err = fullSecondaryController.Cleanup() + err = fullUDNController.Cleanup() Expect(err).NotTo(HaveOccurred()) Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(generateUDNPostInitDB([]libovsdbtest.TestData{nbZone}))) @@ -471,22 +471,22 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 2 network", func() { }) -func dummySecondaryLayer2UserDefinedNetwork(subnets string) secondaryNetInfo { - return secondaryNetInfo{ - netName: secondaryNetworkName, +func dummySecondaryLayer2UserDefinedNetwork(subnets string) userDefinedNetInfo { + return userDefinedNetInfo{ + netName: userDefinedNetworkName, nadName: namespacedName(ns, nadName), topology: ovntypes.Layer2Topology, clustersubnets: subnets, } } -func dummyPrimaryLayer2UserDefinedNetwork(subnets string) secondaryNetInfo { - secondaryNet := dummySecondaryLayer2UserDefinedNetwork(subnets) - secondaryNet.isPrimary = true - return secondaryNet +func dummyPrimaryLayer2UserDefinedNetwork(subnets string) userDefinedNetInfo { + udnNetInfo := dummySecondaryLayer2UserDefinedNetwork(subnets) + udnNetInfo.isPrimary = true + return udnNetInfo } -func dummyL2TestPod(nsName string, info secondaryNetInfo, podIdx, secondaryNetIdx int) testPod { +func dummyL2TestPod(nsName string, info userDefinedNetInfo, podIdx, udnNetIdx int) testPod { const nodeSubnet = "10.128.1.0/24" if info.isPrimary { @@ -509,8 +509,8 @@ func dummyL2TestPod(nsName string, info secondaryNetInfo, podIdx, secondaryNetId info.clustersubnets, "", "100.200.0.1", - fmt.Sprintf("100.200.0.%d/16", secondaryNetIdx+3), - fmt.Sprintf("0a:58:64:c8:00:%0.2d", secondaryNetIdx+3), + fmt.Sprintf("100.200.0.%d/16", udnNetIdx+3), + fmt.Sprintf("0a:58:64:c8:00:%0.2d", udnNetIdx+3), "primary", 0, []util.PodRoute{ @@ -533,8 +533,8 @@ func dummyL2TestPod(nsName string, info secondaryNetInfo, podIdx, secondaryNetId info.clustersubnets, "", "", - fmt.Sprintf("100.200.0.%d/16", secondaryNetIdx+1), - fmt.Sprintf("0a:58:64:c8:00:%0.2d", secondaryNetIdx+1), + fmt.Sprintf("100.200.0.%d/16", udnNetIdx+1), + fmt.Sprintf("0a:58:64:c8:00:%0.2d", udnNetIdx+1), "secondary", 0, []util.PodRoute{}, @@ -650,16 +650,16 @@ func ipv4DefaultRoute() *net.IPNet { } } -func dummyLayer2SecondaryUserDefinedNetwork(subnets string) secondaryNetInfo { - return secondaryNetInfo{ - netName: secondaryNetworkName, +func dummyLayer2SecondaryUserDefinedNetwork(subnets string) userDefinedNetInfo { + return userDefinedNetInfo{ + netName: userDefinedNetworkName, nadName: namespacedName(ns, nadName), topology: ovntypes.Layer2Topology, clustersubnets: subnets, } } -func dummyLayer2PrimaryUserDefinedNetwork(subnets string) secondaryNetInfo { +func dummyLayer2PrimaryUserDefinedNetwork(subnets string) userDefinedNetInfo { secondaryNet := dummyLayer2SecondaryUserDefinedNetwork(subnets) secondaryNet.isPrimary = true return secondaryNet @@ -679,7 +679,7 @@ func nodeCIDR() *net.IPNet { } } -func setupFakeOvnForLayer2Topology(fakeOvn *FakeOVN, initialDB libovsdbtest.TestSetup, netInfo secondaryNetInfo, testNode *corev1.Node, podInfo testPod, pod *corev1.Pod, extraObjects ...runtime.Object) error { +func setupFakeOvnForLayer2Topology(fakeOvn *FakeOVN, initialDB libovsdbtest.TestSetup, netInfo userDefinedNetInfo, testNode *corev1.Node, podInfo testPod, pod *corev1.Pod, extraObjects ...runtime.Object) error { By(fmt.Sprintf("creating a network attachment definition for network: %s", netInfo.netName)) nad, err := newNetworkAttachmentDefinition( ns, @@ -687,7 +687,7 @@ func setupFakeOvnForLayer2Topology(fakeOvn *FakeOVN, initialDB libovsdbtest.Test *netInfo.netconf(), ) Expect(err).NotTo(HaveOccurred()) - nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{ovntypes.OvnNetworkIDAnnotation: userDefinedNetworkID} By("setting up the OVN DB without any entities in it") Expect(netInfo.setupOVNDependencies(&initialDB)).To(Succeed()) @@ -755,24 +755,24 @@ func setupFakeOvnForLayer2Topology(fakeOvn *FakeOVN, initialDB libovsdbtest.Test return err } By("asserting the pod (once reconciled) *features* the OVN pod networks annotation") - secondaryNetController, doesControllerExist := fakeOvn.secondaryControllers[secondaryNetworkName] + userDefinedNetController, doesControllerExist := fakeOvn.userDefinedNetworkControllers[userDefinedNetworkName] if !doesControllerExist { - return fmt.Errorf("expected secondary network controller to exist") + return fmt.Errorf("expected user-defined network controller to exist") } - secondaryNetController.bnc.ovnClusterLRPToJoinIfAddrs = dummyJoinIPs() - podInfo.populateSecondaryNetworkLogicalSwitchCache(secondaryNetController) - if err = secondaryNetController.bnc.WatchNodes(); err != nil { + userDefinedNetController.bnc.ovnClusterLRPToJoinIfAddrs = dummyJoinIPs() + podInfo.populateUserDefinedNetworkLogicalSwitchCache(userDefinedNetController) + if err = userDefinedNetController.bnc.WatchNodes(); err != nil { return err } - if err = secondaryNetController.bnc.WatchPods(); err != nil { + if err = userDefinedNetController.bnc.WatchPods(); err != nil { return err } return nil } -func setupConfig(netInfo secondaryNetInfo, testConfig testConfiguration, gatewayMode config.GatewayMode) { +func setupConfig(netInfo userDefinedNetInfo, testConfig testConfiguration, gatewayMode config.GatewayMode) { if testConfig.configToOverride != nil { config.OVNKubernetesFeature = *testConfig.configToOverride if testConfig.gatewayConfig != nil { diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller.go b/go-controller/pkg/ovn/layer3_user_defined_network_controller.go similarity index 88% rename from go-controller/pkg/ovn/secondary_layer3_network_controller.go rename to go-controller/pkg/ovn/layer3_user_defined_network_controller.go index da57187694..815a8b4c9c 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller.go +++ b/go-controller/pkg/ovn/layer3_user_defined_network_controller.go @@ -35,15 +35,15 @@ import ( utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" ) -type secondaryLayer3NetworkControllerEventHandler struct { +type Layer3UserDefinedNetworkControllerEventHandler struct { baseHandler baseNetworkControllerEventHandler watchFactory *factory.WatchFactory objType reflect.Type - oc *SecondaryLayer3NetworkController + oc *Layer3UserDefinedNetworkController syncFunc func([]interface{}) error } -func (h *secondaryLayer3NetworkControllerEventHandler) FilterOutResource(obj interface{}) bool { +func (h *Layer3UserDefinedNetworkControllerEventHandler) FilterOutResource(obj interface{}) bool { return h.oc.FilterOutResource(h.objType, obj) } @@ -51,56 +51,56 @@ func (h *secondaryLayer3NetworkControllerEventHandler) FilterOutResource(obj int // type considers them equal and therefore no update is needed. It returns false when the two objects are not considered // equal and an update needs be executed. This is regardless of how the update is carried out (whether with a dedicated update // function or with a delete on the old obj followed by an add on the new obj). -func (h *secondaryLayer3NetworkControllerEventHandler) AreResourcesEqual(obj1, obj2 interface{}) (bool, error) { +func (h *Layer3UserDefinedNetworkControllerEventHandler) AreResourcesEqual(obj1, obj2 interface{}) (bool, error) { return h.baseHandler.areResourcesEqual(h.objType, obj1, obj2) } // GetInternalCacheEntry returns the internal cache entry for this object, given an object and its type. // This is now used only for pods, which will get their the logical port cache entry. -func (h *secondaryLayer3NetworkControllerEventHandler) GetInternalCacheEntry(obj interface{}) interface{} { - return h.oc.GetInternalCacheEntryForSecondaryNetwork(h.objType, obj) +func (h *Layer3UserDefinedNetworkControllerEventHandler) GetInternalCacheEntry(obj interface{}) interface{} { + return h.oc.GetInternalCacheEntryForUserDefinedNetwork(h.objType, obj) } // GetResourceFromInformerCache returns the latest state of the object, given an object key and its type. // from the informers cache. -func (h *secondaryLayer3NetworkControllerEventHandler) GetResourceFromInformerCache(key string) (interface{}, error) { +func (h *Layer3UserDefinedNetworkControllerEventHandler) GetResourceFromInformerCache(key string) (interface{}, error) { return h.baseHandler.getResourceFromInformerCache(h.objType, h.watchFactory, key) } // RecordAddEvent records the add event on this given object. -func (h *secondaryLayer3NetworkControllerEventHandler) RecordAddEvent(obj interface{}) { +func (h *Layer3UserDefinedNetworkControllerEventHandler) RecordAddEvent(obj interface{}) { h.baseHandler.recordAddEvent(h.objType, obj) } // RecordUpdateEvent records the udpate event on this given object. -func (h *secondaryLayer3NetworkControllerEventHandler) RecordUpdateEvent(obj interface{}) { +func (h *Layer3UserDefinedNetworkControllerEventHandler) RecordUpdateEvent(obj interface{}) { h.baseHandler.recordUpdateEvent(h.objType, obj) } // RecordDeleteEvent records the delete event on this given object. -func (h *secondaryLayer3NetworkControllerEventHandler) RecordDeleteEvent(obj interface{}) { +func (h *Layer3UserDefinedNetworkControllerEventHandler) RecordDeleteEvent(obj interface{}) { h.baseHandler.recordDeleteEvent(h.objType, obj) } // RecordSuccessEvent records the success event on this given object. -func (h *secondaryLayer3NetworkControllerEventHandler) RecordSuccessEvent(obj interface{}) { +func (h *Layer3UserDefinedNetworkControllerEventHandler) RecordSuccessEvent(obj interface{}) { h.baseHandler.recordSuccessEvent(h.objType, obj) } // RecordErrorEvent records the error event on this given object. -func (h *secondaryLayer3NetworkControllerEventHandler) RecordErrorEvent(_ interface{}, _ string, _ error) { +func (h *Layer3UserDefinedNetworkControllerEventHandler) RecordErrorEvent(_ interface{}, _ string, _ error) { } // IsResourceScheduled returns true if the given object has been scheduled. // Only applied to pods for now. Returns true for all other types. -func (h *secondaryLayer3NetworkControllerEventHandler) IsResourceScheduled(obj interface{}) bool { +func (h *Layer3UserDefinedNetworkControllerEventHandler) IsResourceScheduled(obj interface{}) bool { return h.baseHandler.isResourceScheduled(h.objType, obj) } // AddResource adds the specified object to the cluster according to its type and returns the error, // if any, yielded during object creation. // Given an object to add and a boolean specifying if the function was executed from iterateRetryResources -func (h *secondaryLayer3NetworkControllerEventHandler) AddResource(obj interface{}, fromRetryLoop bool) error { +func (h *Layer3UserDefinedNetworkControllerEventHandler) AddResource(obj interface{}, fromRetryLoop bool) error { switch h.objType { case factory.NodeType: node, ok := obj.(*corev1.Node) @@ -145,7 +145,7 @@ func (h *secondaryLayer3NetworkControllerEventHandler) AddResource(obj interface } } default: - return h.oc.AddSecondaryNetworkResourceCommon(h.objType, obj) + return h.oc.AddUserDefinedNetworkResourceCommon(h.objType, obj) } return nil } @@ -154,7 +154,7 @@ func (h *secondaryLayer3NetworkControllerEventHandler) AddResource(obj interface // type and returns the error, if any, yielded during the object update. // Given an old and a new object; The inRetryCache boolean argument is to indicate if the given resource // is in the retryCache or not. -func (h *secondaryLayer3NetworkControllerEventHandler) UpdateResource(oldObj, newObj interface{}, inRetryCache bool) error { +func (h *Layer3UserDefinedNetworkControllerEventHandler) UpdateResource(oldObj, newObj interface{}, inRetryCache bool) error { switch h.objType { case factory.NodeType: newNode, ok := newObj.(*corev1.Node) @@ -223,14 +223,14 @@ func (h *secondaryLayer3NetworkControllerEventHandler) UpdateResource(oldObj, ne return h.oc.addUpdateRemoteNodeEvent(newNode, syncZoneIC) } default: - return h.oc.UpdateSecondaryNetworkResourceCommon(h.objType, oldObj, newObj, inRetryCache) + return h.oc.UpdateUserDefinedNetworkResourceCommon(h.objType, oldObj, newObj, inRetryCache) } } // DeleteResource deletes the object from the cluster according to the delete logic of its resource type. // Given an object and optionally a cachedObj; cachedObj is the internal cache entry for this object, // used for now for pods and network policies. -func (h *secondaryLayer3NetworkControllerEventHandler) DeleteResource(obj, cachedObj interface{}) error { +func (h *Layer3UserDefinedNetworkControllerEventHandler) DeleteResource(obj, cachedObj interface{}) error { switch h.objType { case factory.NodeType: node, ok := obj.(*corev1.Node) @@ -240,11 +240,11 @@ func (h *secondaryLayer3NetworkControllerEventHandler) DeleteResource(obj, cache return h.oc.deleteNodeEvent(node) default: - return h.oc.DeleteSecondaryNetworkResourceCommon(h.objType, obj, cachedObj) + return h.oc.DeleteUserDefinedNetworkResourceCommon(h.objType, obj, cachedObj) } } -func (h *secondaryLayer3NetworkControllerEventHandler) SyncFunc(objs []interface{}) error { +func (h *Layer3UserDefinedNetworkControllerEventHandler) SyncFunc(objs []interface{}) error { var syncFunc func([]interface{}) error if h.syncFunc != nil { @@ -253,7 +253,7 @@ func (h *secondaryLayer3NetworkControllerEventHandler) SyncFunc(objs []interface } else { switch h.objType { case factory.PodType: - syncFunc = h.oc.syncPodsForSecondaryNetwork + syncFunc = h.oc.syncPodsForUserDefinedNetwork case factory.NodeType: syncFunc = h.oc.syncNodes @@ -279,14 +279,14 @@ func (h *secondaryLayer3NetworkControllerEventHandler) SyncFunc(objs []interface // IsObjectInTerminalState returns true if the given object is a in terminal state. // This is used now for pods that are either in a PodSucceeded or in a PodFailed state. -func (h *secondaryLayer3NetworkControllerEventHandler) IsObjectInTerminalState(obj interface{}) bool { +func (h *Layer3UserDefinedNetworkControllerEventHandler) IsObjectInTerminalState(obj interface{}) bool { return h.baseHandler.isObjectInTerminalState(h.objType, obj) } -// SecondaryLayer3NetworkController is created for logical network infrastructure and policy -// for a secondary l3 network -type SecondaryLayer3NetworkController struct { - BaseSecondaryNetworkController +// Layer3UserDefinedNetworkController is created for logical network infrastructure and policy +// for a l3 UDN +type Layer3UserDefinedNetworkController struct { + BaseUserDefinedNetworkController // Node-specific syncMaps used by node event handler mgmtPortFailed sync.Map @@ -321,23 +321,23 @@ type SecondaryLayer3NetworkController struct { eIPController *EgressIPController } -// NewSecondaryLayer3NetworkController create a new OVN controller for the given secondary layer3 NAD -func NewSecondaryLayer3NetworkController( +// NewLayer3UserDefinedNetworkController create a new OVN controller for the given layer3 NAD +func NewLayer3UserDefinedNetworkController( cnci *CommonNetworkControllerInfo, netInfo util.NetInfo, networkManager networkmanager.Interface, routeImportManager routeimport.Manager, eIPController *EgressIPController, portCache *PortCache, -) (*SecondaryLayer3NetworkController, error) { +) (*Layer3UserDefinedNetworkController, error) { stopChan := make(chan struct{}) ipv4Mode, ipv6Mode := netInfo.IPMode() addressSetFactory := addressset.NewOvnAddressSetFactory(cnci.nbClient, ipv4Mode, ipv6Mode) - oc := &SecondaryLayer3NetworkController{ - BaseSecondaryNetworkController: BaseSecondaryNetworkController{ + oc := &Layer3UserDefinedNetworkController{ + BaseUserDefinedNetworkController: BaseUserDefinedNetworkController{ BaseNetworkController: BaseNetworkController{ CommonNetworkControllerInfo: *cnci, controllerName: getNetworkControllerName(netInfo.GetNetworkName()), @@ -405,7 +405,7 @@ func NewSecondaryLayer3NetworkController( return oc, nil } -func (oc *SecondaryLayer3NetworkController) initRetryFramework() { +func (oc *Layer3UserDefinedNetworkController) initRetryFramework() { oc.retryPods = oc.newRetryFramework(factory.PodType) oc.retryNodes = oc.newRetryFramework(factory.NodeType) @@ -426,9 +426,9 @@ func (oc *SecondaryLayer3NetworkController) initRetryFramework() { } // newRetryFramework builds and returns a retry framework for the input resource type; -func (oc *SecondaryLayer3NetworkController) newRetryFramework( +func (oc *Layer3UserDefinedNetworkController) newRetryFramework( objectType reflect.Type) *retry.RetryFramework { - eventHandler := &secondaryLayer3NetworkControllerEventHandler{ + eventHandler := &Layer3UserDefinedNetworkControllerEventHandler{ baseHandler: baseNetworkControllerEventHandler{}, objType: objectType, watchFactory: oc.watchFactory, @@ -449,9 +449,9 @@ func (oc *SecondaryLayer3NetworkController) newRetryFramework( ) } -// Start starts the secondary layer3 controller, handles all events and creates all needed logical entities -func (oc *SecondaryLayer3NetworkController) Start(_ context.Context) error { - klog.Infof("Start secondary %s network controller of network %s", oc.TopologyType(), oc.GetNetworkName()) +// Start starts the UDN layer3 controller, handles all events and creates all needed logical entities +func (oc *Layer3UserDefinedNetworkController) Start(_ context.Context) error { + klog.Infof("Start %s UDN controller for network %s", oc.TopologyType(), oc.GetNetworkName()) if err := oc.init(); err != nil { return err } @@ -459,8 +459,8 @@ func (oc *SecondaryLayer3NetworkController) Start(_ context.Context) error { } // Stop gracefully stops the controller, and delete all logical entities for this network if requested -func (oc *SecondaryLayer3NetworkController) Stop() { - klog.Infof("Stop secondary %s network controller of network %s", oc.TopologyType(), oc.GetNetworkName()) +func (oc *Layer3UserDefinedNetworkController) Stop() { + klog.Infof("Stop %s UDN controller of network %s", oc.TopologyType(), oc.GetNetworkName()) close(oc.stopChan) oc.cancelableCtx.Cancel() oc.wg.Wait() @@ -487,7 +487,7 @@ func (oc *SecondaryLayer3NetworkController) Stop() { // Cleanup cleans up logical entities for the given network, called from net-attach-def routine // could be called from a dummy Controller (only has CommonNetworkControllerInfo set) -func (oc *SecondaryLayer3NetworkController) Cleanup() error { +func (oc *Layer3UserDefinedNetworkController) Cleanup() error { // cleans up related OVN logical entities var ops []ovsdb.Operation var err error @@ -557,7 +557,7 @@ func (oc *SecondaryLayer3NetworkController) Cleanup() error { return nil } -func (oc *SecondaryLayer3NetworkController) run() error { +func (oc *Layer3UserDefinedNetworkController) run() error { klog.Infof("Starting all the Watchers for network %s ...", oc.GetNetworkName()) start := time.Now() @@ -628,7 +628,7 @@ func (oc *SecondaryLayer3NetworkController) run() error { return nil } -func (oc *SecondaryLayer3NetworkController) Reconcile(netInfo util.NetInfo) error { +func (oc *Layer3UserDefinedNetworkController) Reconcile(netInfo util.NetInfo) error { return oc.BaseNetworkController.reconcile( netInfo, func(node string) { @@ -640,7 +640,7 @@ func (oc *SecondaryLayer3NetworkController) Reconcile(netInfo util.NetInfo) erro // WatchNodes starts the watching of node resource and calls // back the appropriate handler logic -func (oc *SecondaryLayer3NetworkController) WatchNodes() error { +func (oc *Layer3UserDefinedNetworkController) WatchNodes() error { if oc.nodeHandler != nil { return nil } @@ -651,7 +651,7 @@ func (oc *SecondaryLayer3NetworkController) WatchNodes() error { return err } -func (oc *SecondaryLayer3NetworkController) init() error { +func (oc *Layer3UserDefinedNetworkController) init() error { if err := oc.gatherJoinSwitchIPs(); err != nil { return fmt.Errorf("failed to gather join switch IPs for network %s: %v", oc.GetNetworkName(), err) } @@ -699,7 +699,7 @@ func (oc *SecondaryLayer3NetworkController) init() error { return nil } -func (oc *SecondaryLayer3NetworkController) addUpdateLocalNodeEvent(node *corev1.Node, nSyncs *nodeSyncs) error { +func (oc *Layer3UserDefinedNetworkController) addUpdateLocalNodeEvent(node *corev1.Node, nSyncs *nodeSyncs) error { var hostSubnets []*net.IPNet var errs []error var err error @@ -835,7 +835,7 @@ func (oc *SecondaryLayer3NetworkController) addUpdateLocalNodeEvent(node *corev1 return err } -func (oc *SecondaryLayer3NetworkController) addUpdateRemoteNodeEvent(node *corev1.Node, syncZoneIc bool) error { +func (oc *Layer3UserDefinedNetworkController) addUpdateRemoteNodeEvent(node *corev1.Node, syncZoneIc bool) error { _, present := oc.localZoneNodes.Load(node.Name) if present { @@ -870,7 +870,7 @@ func (oc *SecondaryLayer3NetworkController) addUpdateRemoteNodeEvent(node *corev // If isUDNAdvertised is true, then we want to SNAT all packets that are coming from pods on this network // leaving towards nodeIPs on the cluster to masqueradeIP. If network is advertise then the SNAT looks like this: // "eth.dst == 0a:58:5d:5d:00:02 && (ip4.dst == $a712973235162149816)" "169.254.0.36" "93.93.0.0/24" -func (oc *SecondaryLayer3NetworkController) addOrUpdateUDNNodeSubnetEgressSNAT(localPodSubnets []*net.IPNet, node *corev1.Node, isUDNAdvertised bool) error { +func (oc *Layer3UserDefinedNetworkController) addOrUpdateUDNNodeSubnetEgressSNAT(localPodSubnets []*net.IPNet, node *corev1.Node, isUDNAdvertised bool) error { outputPort := types.RouterToSwitchPrefix + oc.GetNetworkScopedName(node.Name) nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort, isUDNAdvertised) if err != nil { @@ -890,13 +890,13 @@ func (oc *SecondaryLayer3NetworkController) addOrUpdateUDNNodeSubnetEgressSNAT(l return nil } -func (oc *SecondaryLayer3NetworkController) addNode(node *corev1.Node) ([]*net.IPNet, error) { - // Node subnet for the secondary layer3 network is allocated by cluster manager. +func (oc *Layer3UserDefinedNetworkController) addNode(node *corev1.Node) ([]*net.IPNet, error) { + // Node subnet for the layer3 UDN is allocated by cluster manager. // Make sure that the node is allocated with the subnet before proceeding // to create OVN Northbound resources. hostSubnets, err := util.ParseNodeHostSubnetAnnotation(node, oc.GetNetworkName()) if err != nil || len(hostSubnets) < 1 { - return nil, fmt.Errorf("subnet annotation in the node %q for the layer3 secondary network %s is missing : %w", node.Name, oc.GetNetworkName(), err) + return nil, fmt.Errorf("subnet annotation in the node %q for the layer3 UDN %s is missing : %w", node.Name, oc.GetNetworkName(), err) } err = oc.createNodeLogicalSwitch(node.Name, hostSubnets, oc.clusterLoadBalancerGroupUUID, oc.switchLoadBalancerGroupUUID) @@ -922,7 +922,7 @@ func (oc *SecondaryLayer3NetworkController) addNode(node *corev1.Node) ([]*net.I return hostSubnets, nil } -func (oc *SecondaryLayer3NetworkController) deleteNodeEvent(node *corev1.Node) error { +func (oc *Layer3UserDefinedNetworkController) deleteNodeEvent(node *corev1.Node) error { klog.V(5).Infof("Deleting Node %q for network %s. Removing the node from "+ "various caches", node.Name, oc.GetNetworkName()) @@ -950,7 +950,7 @@ func (oc *SecondaryLayer3NetworkController) deleteNodeEvent(node *corev1.Node) e return nil } -func (oc *SecondaryLayer3NetworkController) deleteNode(nodeName string) error { +func (oc *Layer3UserDefinedNetworkController) deleteNode(nodeName string) error { if err := oc.deleteNodeLogicalNetwork(nodeName); err != nil { return fmt.Errorf("error deleting node %s logical network: %v", nodeName, err) } @@ -962,7 +962,7 @@ func (oc *SecondaryLayer3NetworkController) deleteNode(nodeName string) error { // watchNodes() will be called for all existing nodes at startup anyway. // Note that this list will include the 'join' cluster switch, which we // do not want to delete. -func (oc *SecondaryLayer3NetworkController) syncNodes(nodes []interface{}) error { +func (oc *Layer3UserDefinedNetworkController) syncNodes(nodes []interface{}) error { foundNodes := sets.New[string]() for _, tmp := range nodes { node, ok := tmp.(*corev1.Node) @@ -1005,7 +1005,7 @@ func (oc *SecondaryLayer3NetworkController) syncNodes(nodes []interface{}) error return nil } -func (oc *SecondaryLayer3NetworkController) gatherJoinSwitchIPs() error { +func (oc *Layer3UserDefinedNetworkController) gatherJoinSwitchIPs() error { // Allocate IPs for logical router port prefixed with // `GwRouterToJoinSwitchPrefix` for the network managed by this controller. // This should always allocate the first IPs in the join switch subnets. @@ -1017,7 +1017,7 @@ func (oc *SecondaryLayer3NetworkController) gatherJoinSwitchIPs() error { return nil } -func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *corev1.Node) (*GatewayConfig, error) { +func (oc *Layer3UserDefinedNetworkController) nodeGatewayConfig(node *corev1.Node) (*GatewayConfig, error) { l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(node) if err != nil { return nil, fmt.Errorf("failed to get node %s network %s L3 gateway config: %v", node.Name, oc.GetNetworkName(), err) @@ -1078,7 +1078,7 @@ func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *corev1.Node) }, nil } -func (oc *SecondaryLayer3NetworkController) newClusterRouter() (*nbdb.LogicalRouter, error) { +func (oc *Layer3UserDefinedNetworkController) newClusterRouter() (*nbdb.LogicalRouter, error) { if oc.multicastSupport { return oc.gatewayTopologyFactory.NewClusterRouterWithMulticastSupport( oc.GetNetworkScopedClusterRouterName(), @@ -1093,7 +1093,7 @@ func (oc *SecondaryLayer3NetworkController) newClusterRouter() (*nbdb.LogicalRou ) } -func (oc *SecondaryLayer3NetworkController) newGatewayManager(nodeName string) *GatewayManager { +func (oc *Layer3UserDefinedNetworkController) newGatewayManager(nodeName string) *GatewayManager { return NewGatewayManager( nodeName, oc.defaultCOPPUUID, @@ -1105,7 +1105,7 @@ func (oc *SecondaryLayer3NetworkController) newGatewayManager(nodeName string) * ) } -func (oc *SecondaryLayer3NetworkController) gatewayOptions() []GatewayOption { +func (oc *Layer3UserDefinedNetworkController) gatewayOptions() []GatewayOption { var opts []GatewayOption if oc.clusterLoadBalancerGroupUUID != "" { opts = append(opts, WithLoadBalancerGroups( @@ -1117,7 +1117,7 @@ func (oc *SecondaryLayer3NetworkController) gatewayOptions() []GatewayOption { return opts } -func (oc *SecondaryLayer3NetworkController) gatewayManagerForNode(nodeName string) *GatewayManager { +func (oc *Layer3UserDefinedNetworkController) gatewayManagerForNode(nodeName string) *GatewayManager { obj, isFound := oc.gatewayManagers.Load(nodeName) if !isFound { return oc.newGatewayManager(nodeName) @@ -1135,7 +1135,7 @@ func (oc *SecondaryLayer3NetworkController) gatewayManagerForNode(nodeName strin } } -func (oc *SecondaryLayer3NetworkController) StartServiceController(wg *sync.WaitGroup, runRepair bool) error { +func (oc *Layer3UserDefinedNetworkController) StartServiceController(wg *sync.WaitGroup, runRepair bool) error { useLBGroups := oc.clusterLoadBalancerGroupUUID != "" // use 5 workers like most of the kubernetes controllers in the kubernetes controller-manager // do not use LB templates for UDNs - OVN bug https://issues.redhat.com/browse/FDP-988 diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller_test.go b/go-controller/pkg/ovn/layer3_user_defined_network_controller_test.go similarity index 92% rename from go-controller/pkg/ovn/secondary_layer3_network_controller_test.go rename to go-controller/pkg/ovn/layer3_user_defined_network_controller_test.go index fe667417ec..f9cec964ae 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller_test.go +++ b/go-controller/pkg/ovn/layer3_user_defined_network_controller_test.go @@ -33,7 +33,7 @@ import ( . "github.com/onsi/gomega" ) -type secondaryNetInfo struct { +type userDefinedNetInfo struct { netName string nadName string clustersubnets string @@ -45,12 +45,12 @@ type secondaryNetInfo struct { } const ( - nadName = "blue-net" - ns = "namespace1" - secondaryNetworkName = "isolatednet" - secondaryNetworkID = "2" - denyPolicyName = "deny-all-policy" - denyPG = "deny-port-group" + nadName = "blue-net" + ns = "namespace1" + userDefinedNetworkName = "isolatednet" + userDefinedNetworkID = "2" + denyPolicyName = "deny-all-policy" + denyPG = "deny-port-group" ) type testConfiguration struct { @@ -92,7 +92,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 3 network", func() { DescribeTable( "reconciles a new", - func(netInfo secondaryNetInfo, testConfig testConfiguration, gwMode config.GatewayMode) { + func(netInfo userDefinedNetInfo, testConfig testConfiguration, gwMode config.GatewayMode) { podInfo := dummyTestPod(ns, netInfo) if testConfig.configToOverride != nil { config.OVNKubernetesFeature = *testConfig.configToOverride @@ -113,7 +113,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 3 network", func() { *netInfo.netconf(), ) Expect(err).NotTo(HaveOccurred()) - nad.Annotations = map[string]string{types.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{types.OvnNetworkIDAnnotation: userDefinedNetworkID} Expect(netInfo.setupOVNDependencies(&initialDB)).To(Succeed()) n := newNamespace(ns) if netInfo.isPrimary { @@ -139,7 +139,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 3 network", func() { } const nodeIPv4CIDR = "192.168.126.202/24" - testNode, err := newNodeWithSecondaryNets(nodeName, nodeIPv4CIDR, netInfo) + testNode, err := newNodeWithUserDefinedNetworks(nodeName, nodeIPv4CIDR, netInfo) Expect(err).NotTo(HaveOccurred()) networkPolicy := getMatchLabelsNetworkPolicy(denyPolicyName, ns, "", "", false, false) fakeOvn.startWithDBSetup( @@ -180,16 +180,16 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 3 network", func() { if netInfo.isPrimary { Expect(fakeOvn.controller.WatchNetworkPolicy()).NotTo(HaveOccurred()) } - secondaryNetController, ok := fakeOvn.secondaryControllers[secondaryNetworkName] + userDefinedNetController, ok := fakeOvn.userDefinedNetworkControllers[userDefinedNetworkName] Expect(ok).To(BeTrue()) - secondaryNetController.bnc.ovnClusterLRPToJoinIfAddrs = dummyJoinIPs() - podInfo.populateSecondaryNetworkLogicalSwitchCache(secondaryNetController) - Expect(secondaryNetController.bnc.WatchNodes()).To(Succeed()) - Expect(secondaryNetController.bnc.WatchPods()).To(Succeed()) + userDefinedNetController.bnc.ovnClusterLRPToJoinIfAddrs = dummyJoinIPs() + podInfo.populateUserDefinedNetworkLogicalSwitchCache(userDefinedNetController) + Expect(userDefinedNetController.bnc.WatchNodes()).To(Succeed()) + Expect(userDefinedNetController.bnc.WatchPods()).To(Succeed()) if netInfo.isPrimary { - Expect(secondaryNetController.bnc.WatchNetworkPolicy()).To(Succeed()) + Expect(userDefinedNetController.bnc.WatchNetworkPolicy()).To(Succeed()) ninfo, err := fakeOvn.networkManager.Interface().GetActiveNetworkForNamespace(ns) Expect(err).NotTo(HaveOccurred()) Expect(ninfo.GetNetworkName()).To(Equal(netInfo.netName)) @@ -211,24 +211,24 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 3 network", func() { if testConfig.configToOverride != nil && testConfig.configToOverride.EnableEgressFirewall { defaultNetExpectations = append(defaultNetExpectations, buildNamespacedPortGroup(podInfo.namespace, DefaultNetworkControllerName)) - secNetPG := buildNamespacedPortGroup(podInfo.namespace, secondaryNetController.bnc.controllerName) - portName := util.GetSecondaryNetworkLogicalPortName(podInfo.namespace, podInfo.podName, netInfo.nadName) + "-UUID" + secNetPG := buildNamespacedPortGroup(podInfo.namespace, userDefinedNetController.bnc.controllerName) + portName := util.GetUserDefinedNetworkLogicalPortName(podInfo.namespace, podInfo.podName, netInfo.nadName) + "-UUID" secNetPG.Ports = []string{portName} defaultNetExpectations = append(defaultNetExpectations, secNetPG) } networkConfig, err := util.NewNetInfo(netInfo.netconf()) Expect(err).NotTo(HaveOccurred()) // Add NetPol hairpin ACLs and PGs for the validation. - mgmtPortName := managementPortName(secondaryNetController.bnc.GetNetworkScopedName(nodeName)) + mgmtPortName := managementPortName(userDefinedNetController.bnc.GetNetworkScopedName(nodeName)) mgmtPortUUID := mgmtPortName + "-UUID" defaultNetExpectations = append(defaultNetExpectations, getHairpinningACLsV4AndPortGroup()...) defaultNetExpectations = append(defaultNetExpectations, getHairpinningACLsV4AndPortGroupForNetwork(networkConfig, []string{mgmtPortUUID})...) // Add Netpol deny policy ACLs and PGs for the validation. - podLPortName := util.GetSecondaryNetworkLogicalPortName(podInfo.namespace, podInfo.podName, netInfo.nadName) + "-UUID" + podLPortName := util.GetUserDefinedNetworkLogicalPortName(podInfo.namespace, podInfo.podName, netInfo.nadName) + "-UUID" dataParams := newNetpolDataParams(networkPolicy).withLocalPortUUIDs(podLPortName).withNetInfo(networkConfig) defaultDenyExpectedData := getDefaultDenyData(dataParams) - pgDbIDs := getNetworkPolicyPortGroupDbIDs(ns, secondaryNetController.bnc.controllerName, denyPolicyName) + pgDbIDs := getNetworkPolicyPortGroupDbIDs(ns, userDefinedNetController.bnc.controllerName, denyPolicyName) ingressPG := libovsdbutil.BuildPortGroup(pgDbIDs, nil, nil) ingressPG.UUID = denyPG ingressPG.Ports = []string{podLPortName} @@ -239,7 +239,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 3 network", func() { libovsdbtest.HaveData( append( defaultNetExpectations, - newSecondaryNetworkExpectationMachine( + newUserDefinedNetworkExpectationMachine( fakeOvn, []testPod{podInfo}, expectationOptions..., @@ -293,7 +293,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 3 network", func() { DescribeTable( "the gateway is properly cleaned up", - func(netInfo secondaryNetInfo, testConfig testConfiguration) { + func(netInfo userDefinedNetInfo, testConfig testConfiguration) { config.OVNKubernetesFeature.EnableMultiNetwork = true config.OVNKubernetesFeature.EnableNetworkSegmentation = true podInfo := dummyTestPod(ns, netInfo) @@ -314,7 +314,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 3 network", func() { *netConf, ) Expect(err).NotTo(HaveOccurred()) - nad.Annotations = map[string]string{types.OvnNetworkIDAnnotation: secondaryNetworkID} + nad.Annotations = map[string]string{types.OvnNetworkIDAnnotation: userDefinedNetworkID} mutableNetworkConfig := util.NewMutableNetInfo(networkConfig) mutableNetworkConfig.SetNADs(util.GetNADName(nad.Namespace, nad.Name)) @@ -326,7 +326,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 3 network", func() { fakeNetworkManager.PrimaryNetworks[ns] = networkConfig const nodeIPv4CIDR = "192.168.126.202/24" - testNode, err := newNodeWithSecondaryNets(nodeName, nodeIPv4CIDR, netInfo) + testNode, err := newNodeWithUserDefinedNetworks(nodeName, nodeIPv4CIDR, netInfo) Expect(err).NotTo(HaveOccurred()) nbZone := &nbdb.NBGlobal{Name: types.OvnDefaultZone, UUID: types.OvnDefaultZone} @@ -390,16 +390,16 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 3 network", func() { Expect(fakeOvn.controller.WatchNamespaces()).To(Succeed()) Expect(fakeOvn.controller.WatchPods()).To(Succeed()) - secondaryNetController, ok := fakeOvn.secondaryControllers[secondaryNetworkName] + userDefinedNetController, ok := fakeOvn.userDefinedNetworkControllers[userDefinedNetworkName] Expect(ok).To(BeTrue()) - secondaryNetController.bnc.ovnClusterLRPToJoinIfAddrs = dummyJoinIPs() - podInfo.populateSecondaryNetworkLogicalSwitchCache(secondaryNetController) - Expect(secondaryNetController.bnc.WatchNodes()).To(Succeed()) - Expect(secondaryNetController.bnc.WatchPods()).To(Succeed()) + userDefinedNetController.bnc.ovnClusterLRPToJoinIfAddrs = dummyJoinIPs() + podInfo.populateUserDefinedNetworkLogicalSwitchCache(userDefinedNetController) + Expect(userDefinedNetController.bnc.WatchNodes()).To(Succeed()) + Expect(userDefinedNetController.bnc.WatchPods()).To(Succeed()) if netInfo.isPrimary { - Expect(secondaryNetController.bnc.WatchNetworkPolicy()).To(Succeed()) + Expect(userDefinedNetController.bnc.WatchNetworkPolicy()).To(Succeed()) } Expect(fakeOvn.fakeClient.KubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{})).To(Succeed()) @@ -407,8 +407,8 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 3 network", func() { // we must access the layer3 controller to be able to issue its cleanup function (to remove the GW related stuff). Expect( - newSecondaryLayer3NetworkController( - &secondaryNetController.bnc.CommonNetworkControllerInfo, + newLayer3UserDefinedNetworkController( + &userDefinedNetController.bnc.CommonNetworkControllerInfo, networkConfig, nodeName, fakeNetworkManager, @@ -446,7 +446,7 @@ var _ = Describe("OVN Multi-Homed pod operations for layer 3 network", func() { func newPodWithPrimaryUDN( nodeName, nodeSubnet, nodeMgtIP, nodeGWIP, podName, podIPs, podMAC, namespace string, - primaryUDNConfig secondaryNetInfo, + primaryUDNConfig userDefinedNetInfo, ) testPod { pod := newTPod(nodeName, nodeSubnet, nodeMgtIP, "", podName, podIPs, podMAC, namespace) if primaryUDNConfig.isPrimary { @@ -493,7 +493,7 @@ func newPodWithPrimaryUDN( func namespacedName(ns, name string) string { return fmt.Sprintf("%s/%s", ns, name) } -func (sni *secondaryNetInfo) getNetworkRole() string { +func (sni *userDefinedNetInfo) getNetworkRole() string { return util.GetUserDefinedNetworkRole(sni.isPrimary) } @@ -501,7 +501,7 @@ func getNetworkRole(netInfo util.NetInfo) string { return util.GetUserDefinedNetworkRole(netInfo.IsPrimaryNetwork()) } -func (sni *secondaryNetInfo) setupOVNDependencies(dbData *libovsdbtest.TestSetup) error { +func (sni *userDefinedNetInfo) setupOVNDependencies(dbData *libovsdbtest.TestSetup) error { netInfo, err := util.NewNetInfo(sni.netconf()) if err != nil { return err @@ -536,7 +536,7 @@ func (sni *secondaryNetInfo) setupOVNDependencies(dbData *libovsdbtest.TestSetup return nil } -func (sni *secondaryNetInfo) netconf() *ovncnitypes.NetConf { +func (sni *userDefinedNetInfo) netconf() *ovncnitypes.NetConf { const plugin = "ovn-k8s-cni-overlay" role := types.NetworkRoleSecondary @@ -556,7 +556,7 @@ func (sni *secondaryNetInfo) netconf() *ovncnitypes.NetConf { } } -func dummyTestPod(nsName string, info secondaryNetInfo) testPod { +func dummyTestPod(nsName string, info userDefinedNetInfo) testPod { const nodeSubnet = "10.128.1.0/24" if info.isPrimary { return newPodWithPrimaryUDN( @@ -592,9 +592,9 @@ func dummyTestPod(nsName string, info secondaryNetInfo) testPod { return pod } -func dummySecondaryLayer3UserDefinedNetwork(clustersubnets, hostsubnets string) secondaryNetInfo { - return secondaryNetInfo{ - netName: secondaryNetworkName, +func dummySecondaryLayer3UserDefinedNetwork(clustersubnets, hostsubnets string) userDefinedNetInfo { + return userDefinedNetInfo{ + netName: userDefinedNetworkName, nadName: namespacedName(ns, nadName), topology: types.Layer3Topology, clustersubnets: clustersubnets, @@ -602,18 +602,18 @@ func dummySecondaryLayer3UserDefinedNetwork(clustersubnets, hostsubnets string) } } -func dummyPrimaryLayer3UserDefinedNetwork(clustersubnets, hostsubnets string) secondaryNetInfo { +func dummyPrimaryLayer3UserDefinedNetwork(clustersubnets, hostsubnets string) userDefinedNetInfo { secondaryNet := dummySecondaryLayer3UserDefinedNetwork(clustersubnets, hostsubnets) secondaryNet.isPrimary = true return secondaryNet } // This util is returning a network-name/hostSubnet for the node's node-subnets annotation -func (sni *secondaryNetInfo) String() string { +func (sni *userDefinedNetInfo) String() string { return fmt.Sprintf("%q: %q", sni.netName, sni.hostsubnets) } -func newNodeWithSecondaryNets(nodeName string, nodeIPv4CIDR string, netInfos ...secondaryNetInfo) (*corev1.Node, error) { +func newNodeWithUserDefinedNetworks(nodeName string, nodeIPv4CIDR string, netInfos ...userDefinedNetInfo) (*corev1.Node, error) { var nodeSubnetInfo []string for _, info := range netInfos { nodeSubnetInfo = append(nodeSubnetInfo, info.String()) @@ -641,7 +641,7 @@ func newNodeWithSecondaryNets(nodeName string, nodeIPv4CIDR string, netInfos ... "k8s.ovn.org/zone-name": "global", "k8s.ovn.org/l3-gateway-config": fmt.Sprintf("{\"default\":{\"mode\":\"shared\",\"bridge-id\":\"breth0\",\"interface-id\":\"breth0_ovn-worker\",\"mac-address\":%q,\"ip-addresses\":[%[2]q],\"ip-address\":%[2]q,\"next-hops\":[%[3]q],\"next-hop\":%[3]q,\"node-port-enable\":\"true\",\"vlan-id\":\"0\"}}", util.IPAddrToHWAddr(nodeIP), nodeCIDR, nextHopIP), util.OvnNodeChassisID: "abdcef", - "k8s.ovn.org/network-ids": fmt.Sprintf("{\"default\":\"0\",\"isolatednet\":\"%s\"}", secondaryNetworkID), + "k8s.ovn.org/network-ids": fmt.Sprintf("{\"default\":\"0\",\"isolatednet\":\"%s\"}", userDefinedNetworkID), util.OvnNodeID: "4", "k8s.ovn.org/udn-layer2-node-gateway-router-lrp-tunnel-ids": "{\"isolatednet\":\"25\"}", }, @@ -1043,15 +1043,15 @@ func standardNonDefaultNetworkExtIDsForLogicalSwitch(netInfo util.NetInfo) map[s return externalIDs } -func newSecondaryLayer3NetworkController( +func newLayer3UserDefinedNetworkController( cnci *CommonNetworkControllerInfo, netInfo util.NetInfo, nodeName string, networkManager networkmanager.Interface, eIPController *EgressIPController, portCache *PortCache, -) *SecondaryLayer3NetworkController { - layer3NetworkController, err := NewSecondaryLayer3NetworkController(cnci, netInfo, networkManager, nil, eIPController, portCache) +) *Layer3UserDefinedNetworkController { + layer3NetworkController, err := NewLayer3UserDefinedNetworkController(cnci, netInfo, networkManager, nil, eIPController, portCache) Expect(err).NotTo(HaveOccurred()) layer3NetworkController.gatewayManagers.Store( nodeName, diff --git a/go-controller/pkg/ovn/secondary_localnet_network_controller.go b/go-controller/pkg/ovn/localnet_user_defined_network_controller.go similarity index 72% rename from go-controller/pkg/ovn/secondary_localnet_network_controller.go rename to go-controller/pkg/ovn/localnet_user_defined_network_controller.go index 24cb69f631..5904c689b9 100644 --- a/go-controller/pkg/ovn/secondary_localnet_network_controller.go +++ b/go-controller/pkg/ovn/localnet_user_defined_network_controller.go @@ -27,15 +27,15 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) -type secondaryLocalnetNetworkControllerEventHandler struct { +type LocalnetUserDefinedNetworkControllerEventHandler struct { baseHandler baseNetworkControllerEventHandler watchFactory *factory.WatchFactory objType reflect.Type - oc *SecondaryLocalnetNetworkController + oc *LocalnetUserDefinedNetworkController syncFunc func([]interface{}) error } -func (h *secondaryLocalnetNetworkControllerEventHandler) FilterOutResource(obj interface{}) bool { +func (h *LocalnetUserDefinedNetworkControllerEventHandler) FilterOutResource(obj interface{}) bool { return h.oc.FilterOutResource(h.objType, obj) } @@ -43,24 +43,24 @@ func (h *secondaryLocalnetNetworkControllerEventHandler) FilterOutResource(obj i // type considers them equal and therefore no update is needed. It returns false when the two objects are not considered // equal and an update needs be executed. This is regardless of how the update is carried out (whether with a dedicated update // function or with a delete on the old obj followed by an add on the new obj). -func (h *secondaryLocalnetNetworkControllerEventHandler) AreResourcesEqual(obj1, obj2 interface{}) (bool, error) { +func (h *LocalnetUserDefinedNetworkControllerEventHandler) AreResourcesEqual(obj1, obj2 interface{}) (bool, error) { return h.baseHandler.areResourcesEqual(h.objType, obj1, obj2) } // GetInternalCacheEntry returns the internal cache entry for this object, given an object and its type. // This is now used only for pods, which will get their the logical port cache entry. -func (h *secondaryLocalnetNetworkControllerEventHandler) GetInternalCacheEntry(obj interface{}) interface{} { - return h.oc.GetInternalCacheEntryForSecondaryNetwork(h.objType, obj) +func (h *LocalnetUserDefinedNetworkControllerEventHandler) GetInternalCacheEntry(obj interface{}) interface{} { + return h.oc.GetInternalCacheEntryForUserDefinedNetwork(h.objType, obj) } // GetResourceFromInformerCache returns the latest state of the object, given an object key and its type. // from the informers cache. -func (h *secondaryLocalnetNetworkControllerEventHandler) GetResourceFromInformerCache(key string) (interface{}, error) { +func (h *LocalnetUserDefinedNetworkControllerEventHandler) GetResourceFromInformerCache(key string) (interface{}, error) { return h.baseHandler.getResourceFromInformerCache(h.objType, h.watchFactory, key) } // RecordAddEvent records the add event on this given object. -func (h *secondaryLocalnetNetworkControllerEventHandler) RecordAddEvent(obj interface{}) { +func (h *LocalnetUserDefinedNetworkControllerEventHandler) RecordAddEvent(obj interface{}) { switch h.objType { case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) @@ -70,34 +70,34 @@ func (h *secondaryLocalnetNetworkControllerEventHandler) RecordAddEvent(obj inte } // RecordUpdateEvent records the udpate event on this given object. -func (h *secondaryLocalnetNetworkControllerEventHandler) RecordUpdateEvent(obj interface{}) { +func (h *LocalnetUserDefinedNetworkControllerEventHandler) RecordUpdateEvent(obj interface{}) { h.baseHandler.recordAddEvent(h.objType, obj) } // RecordDeleteEvent records the delete event on this given object. -func (h *secondaryLocalnetNetworkControllerEventHandler) RecordDeleteEvent(obj interface{}) { +func (h *LocalnetUserDefinedNetworkControllerEventHandler) RecordDeleteEvent(obj interface{}) { h.baseHandler.recordAddEvent(h.objType, obj) } // RecordSuccessEvent records the success event on this given object. -func (h *secondaryLocalnetNetworkControllerEventHandler) RecordSuccessEvent(obj interface{}) { +func (h *LocalnetUserDefinedNetworkControllerEventHandler) RecordSuccessEvent(obj interface{}) { h.baseHandler.recordAddEvent(h.objType, obj) } // RecordErrorEvent records the error event on this given object. -func (h *secondaryLocalnetNetworkControllerEventHandler) RecordErrorEvent(_ interface{}, _ string, _ error) { +func (h *LocalnetUserDefinedNetworkControllerEventHandler) RecordErrorEvent(_ interface{}, _ string, _ error) { } // IsResourceScheduled returns true if the given object has been scheduled. // Only applied to pods for now. Returns true for all other types. -func (h *secondaryLocalnetNetworkControllerEventHandler) IsResourceScheduled(obj interface{}) bool { +func (h *LocalnetUserDefinedNetworkControllerEventHandler) IsResourceScheduled(obj interface{}) bool { return h.baseHandler.isResourceScheduled(h.objType, obj) } // AddResource adds the specified object to the cluster according to its type and returns the error, // if any, yielded during object creation. // Given an object to add and a boolean specifying if the function was executed from iterateRetryResources -func (h *secondaryLocalnetNetworkControllerEventHandler) AddResource(obj interface{}, _ bool) error { +func (h *LocalnetUserDefinedNetworkControllerEventHandler) AddResource(obj interface{}, _ bool) error { switch h.objType { case factory.NodeType: node, ok := obj.(*corev1.Node) @@ -106,7 +106,7 @@ func (h *secondaryLocalnetNetworkControllerEventHandler) AddResource(obj interfa } return h.oc.addUpdateNodeEvent(node) default: - return h.oc.AddSecondaryNetworkResourceCommon(h.objType, obj) + return h.oc.AddUserDefinedNetworkResourceCommon(h.objType, obj) } } @@ -114,7 +114,7 @@ func (h *secondaryLocalnetNetworkControllerEventHandler) AddResource(obj interfa // type and returns the error, if any, yielded during the object update. // Given an old and a new object; The inRetryCache boolean argument is to indicate if the given resource // is in the retryCache or not. -func (h *secondaryLocalnetNetworkControllerEventHandler) UpdateResource(oldObj, newObj interface{}, inRetryCache bool) error { +func (h *LocalnetUserDefinedNetworkControllerEventHandler) UpdateResource(oldObj, newObj interface{}, inRetryCache bool) error { switch h.objType { case factory.NodeType: node, ok := newObj.(*corev1.Node) @@ -123,14 +123,14 @@ func (h *secondaryLocalnetNetworkControllerEventHandler) UpdateResource(oldObj, } return h.oc.addUpdateNodeEvent(node) default: - return h.oc.UpdateSecondaryNetworkResourceCommon(h.objType, oldObj, newObj, inRetryCache) + return h.oc.UpdateUserDefinedNetworkResourceCommon(h.objType, oldObj, newObj, inRetryCache) } } // DeleteResource deletes the object from the cluster according to the delete logic of its resource type. // Given an object and optionally a cachedObj; cachedObj is the internal cache entry for this object, // used for now for pods and network policies. -func (h *secondaryLocalnetNetworkControllerEventHandler) DeleteResource(obj, cachedObj interface{}) error { +func (h *LocalnetUserDefinedNetworkControllerEventHandler) DeleteResource(obj, cachedObj interface{}) error { switch h.objType { case factory.NodeType: node, ok := obj.(*corev1.Node) @@ -139,11 +139,11 @@ func (h *secondaryLocalnetNetworkControllerEventHandler) DeleteResource(obj, cac } return h.oc.deleteNodeEvent(node) default: - return h.oc.DeleteSecondaryNetworkResourceCommon(h.objType, obj, cachedObj) + return h.oc.DeleteUserDefinedNetworkResourceCommon(h.objType, obj, cachedObj) } } -func (h *secondaryLocalnetNetworkControllerEventHandler) SyncFunc(objs []interface{}) error { +func (h *LocalnetUserDefinedNetworkControllerEventHandler) SyncFunc(objs []interface{}) error { var syncFunc func([]interface{}) error if h.syncFunc != nil { @@ -155,7 +155,7 @@ func (h *secondaryLocalnetNetworkControllerEventHandler) SyncFunc(objs []interfa syncFunc = h.oc.syncNodes case factory.PodType: - syncFunc = h.oc.syncPodsForSecondaryNetwork + syncFunc = h.oc.syncPodsForUserDefinedNetwork case factory.NamespaceType: syncFunc = h.oc.syncNamespaces @@ -178,30 +178,30 @@ func (h *secondaryLocalnetNetworkControllerEventHandler) SyncFunc(objs []interfa // IsObjectInTerminalState returns true if the given object is a in terminal state. // This is used now for pods that are either in a PodSucceeded or in a PodFailed state. -func (h *secondaryLocalnetNetworkControllerEventHandler) IsObjectInTerminalState(obj interface{}) bool { +func (h *LocalnetUserDefinedNetworkControllerEventHandler) IsObjectInTerminalState(obj interface{}) bool { return h.baseHandler.isObjectInTerminalState(h.objType, obj) } -// SecondaryLocalnetNetworkController is created for logical network infrastructure and policy -// for a secondary localnet network -type SecondaryLocalnetNetworkController struct { - BaseSecondaryLayer2NetworkController +// LocalnetUserDefinedNetworkController is created for logical network infrastructure and policy +// for a localnet user-defined network +type LocalnetUserDefinedNetworkController struct { + BaseLayer2UserDefinedNetworkController } -// NewSecondaryLocalnetNetworkController create a new OVN controller for the given secondary localnet NAD -func NewSecondaryLocalnetNetworkController( +// NewLocalnetUserDefinedNetworkController create a new OVN controller for the given localnet NAD +func NewLocalnetUserDefinedNetworkController( cnci *CommonNetworkControllerInfo, netInfo util.NetInfo, networkManager networkmanager.Interface, -) *SecondaryLocalnetNetworkController { +) *LocalnetUserDefinedNetworkController { stopChan := make(chan struct{}) ipv4Mode, ipv6Mode := netInfo.IPMode() addressSetFactory := addressset.NewOvnAddressSetFactory(cnci.nbClient, ipv4Mode, ipv6Mode) - oc := &SecondaryLocalnetNetworkController{ - BaseSecondaryLayer2NetworkController{ - BaseSecondaryNetworkController: BaseSecondaryNetworkController{ + oc := &LocalnetUserDefinedNetworkController{ + BaseLayer2UserDefinedNetworkController{ + BaseUserDefinedNetworkController: BaseUserDefinedNetworkController{ BaseNetworkController: BaseNetworkController{ CommonNetworkControllerInfo: *cnci, controllerName: getNetworkControllerName(netInfo.GetNetworkName()), @@ -242,21 +242,21 @@ func NewSecondaryLocalnetNetworkController( claimsReconciler) } - // disable multicast support for secondary networks - // TBD: changes needs to be made to support multicast in secondary networks + // disable multicast support for UDNs + // TBD: changes needs to be made to support multicast in UDNs oc.multicastSupport = false oc.initRetryFramework() return oc } -// Start starts the secondary localnet controller, handles all events and creates all needed logical entities -func (oc *SecondaryLocalnetNetworkController) Start(_ context.Context) error { - klog.Infof("Starting controller for secondary network network %s", oc.GetNetworkName()) +// Start starts the localnet UDN controller, handles all events and creates all needed logical entities +func (oc *LocalnetUserDefinedNetworkController) Start(_ context.Context) error { + klog.Infof("Starting controller for UDN %s", oc.GetNetworkName()) start := time.Now() defer func() { - klog.Infof("Starting controller for secondary network network %s took %v", oc.GetNetworkName(), time.Since(start)) + klog.Infof("Starting controller for UDN %s took %v", oc.GetNetworkName(), time.Since(start)) }() if err := oc.init(); err != nil { @@ -266,17 +266,17 @@ func (oc *SecondaryLocalnetNetworkController) Start(_ context.Context) error { return oc.run() } -func (oc *SecondaryLocalnetNetworkController) run() error { - return oc.BaseSecondaryLayer2NetworkController.run() +func (oc *LocalnetUserDefinedNetworkController) run() error { + return oc.BaseLayer2UserDefinedNetworkController.run() } // Cleanup cleans up logical entities for the given network, called from net-attach-def routine // could be called from a dummy Controller (only has CommonNetworkControllerInfo set) -func (oc *SecondaryLocalnetNetworkController) Cleanup() error { - return oc.BaseSecondaryLayer2NetworkController.cleanup() +func (oc *LocalnetUserDefinedNetworkController) Cleanup() error { + return oc.BaseLayer2UserDefinedNetworkController.cleanup() } -func (oc *SecondaryLocalnetNetworkController) init() error { +func (oc *LocalnetUserDefinedNetworkController) init() error { switchName := oc.GetNetworkScopedSwitchName(types.OVNLocalnetSwitch) logicalSwitch, err := oc.initializeLogicalSwitch(switchName, oc.Subnets(), oc.ExcludeSubnets(), oc.ReservedSubnets(), "", "") @@ -307,19 +307,19 @@ func (oc *SecondaryLocalnetNetworkController) init() error { return nil } -func (oc *SecondaryLocalnetNetworkController) Stop() { - klog.Infof("Stoping controller for secondary network %s", oc.GetNetworkName()) - oc.BaseSecondaryLayer2NetworkController.stop() +func (oc *LocalnetUserDefinedNetworkController) Stop() { + klog.Infof("Stoping controller for UDN %s", oc.GetNetworkName()) + oc.BaseLayer2UserDefinedNetworkController.stop() } -func (oc *SecondaryLocalnetNetworkController) Reconcile(netInfo util.NetInfo) error { +func (oc *LocalnetUserDefinedNetworkController) Reconcile(netInfo util.NetInfo) error { return oc.BaseNetworkController.reconcile( netInfo, func(_ string) {}, ) } -func (oc *SecondaryLocalnetNetworkController) initRetryFramework() { +func (oc *LocalnetUserDefinedNetworkController) initRetryFramework() { oc.retryNodes = oc.newRetryFramework(factory.NodeType) oc.retryPods = oc.newRetryFramework(factory.PodType) if oc.allocatesPodAnnotation() && oc.AllowsPersistentIPs() { @@ -336,9 +336,9 @@ func (oc *SecondaryLocalnetNetworkController) initRetryFramework() { } // newRetryFramework builds and returns a retry framework for the input resource type; -func (oc *SecondaryLocalnetNetworkController) newRetryFramework( +func (oc *LocalnetUserDefinedNetworkController) newRetryFramework( objectType reflect.Type) *retry.RetryFramework { - eventHandler := &secondaryLocalnetNetworkControllerEventHandler{ + eventHandler := &LocalnetUserDefinedNetworkControllerEventHandler{ baseHandler: baseNetworkControllerEventHandler{}, objType: objectType, watchFactory: oc.watchFactory, @@ -359,7 +359,7 @@ func (oc *SecondaryLocalnetNetworkController) newRetryFramework( ) } -func (oc *SecondaryLocalnetNetworkController) localnetPortNetworkNameOptions() map[string]string { +func (oc *LocalnetUserDefinedNetworkController) localnetPortNetworkNameOptions() map[string]string { localnetLSPOptions := map[string]string{ "network_name": oc.GetNetworkName(), } diff --git a/go-controller/pkg/ovn/multicast_test.go b/go-controller/pkg/ovn/multicast_test.go index 11e319214c..d40cc618e4 100644 --- a/go-controller/pkg/ovn/multicast_test.go +++ b/go-controller/pkg/ovn/multicast_test.go @@ -320,8 +320,8 @@ func startBaseNetworkController(fakeOvn *FakeOVN, nad *nadapi.NetworkAttachmentD if nad != nil { netInfo, err := util.ParseNADInfo(nad) Expect(err).ToNot(HaveOccurred()) - Expect(fakeOvn.NewSecondaryNetworkController(nad)).To(Succeed()) - controller, ok := fakeOvn.secondaryControllers[netInfo.GetNetworkName()] + Expect(fakeOvn.NewUserDefinedNetworkController(nad)).To(Succeed()) + controller, ok := fakeOvn.userDefinedNetworkControllers[netInfo.GetNetworkName()] Expect(ok).To(BeTrue()) return &controller.bnc.BaseNetworkController, controller.asf } else { diff --git a/go-controller/pkg/ovn/multihoming_test.go b/go-controller/pkg/ovn/multihoming_test.go index bfcdcd1a75..cd4f07137a 100644 --- a/go-controller/pkg/ovn/multihoming_test.go +++ b/go-controller/pkg/ovn/multihoming_test.go @@ -29,9 +29,9 @@ func (p testPod) addNetwork( tunnelID int, routes []util.PodRoute, ) { - podInfo, ok := p.secondaryPodInfos[netName] + podInfo, ok := p.udnPodInfos[netName] if !ok { - podInfo = &secondaryPodInfo{ + podInfo = &udnPodInfo{ nodeSubnet: nodeSubnet, nodeMgtIP: nodeMgtIP, nodeGWIP: nodeGWIP, @@ -39,12 +39,12 @@ func (p testPod) addNetwork( routes: routes, allportInfo: map[string]portInfo{}, } - p.secondaryPodInfos[netName] = podInfo + p.udnPodInfos[netName] = podInfo } prefixLen, ip := splitPodIPMaskLength(podIP) - portName := util.GetSecondaryNetworkLogicalPortName(p.namespace, p.podName, nadName) + portName := util.GetUserDefinedNetworkLogicalPortName(p.namespace, p.podName, nadName) podInfo.allportInfo[nadName] = portInfo{ portUUID: portName + "-UUID", podIP: ip, @@ -56,7 +56,7 @@ func (p testPod) addNetwork( } func (p testPod) getNetworkPortInfo(netName, nadName string) *portInfo { - podInfo, ok := p.secondaryPodInfos[netName] + podInfo, ok := p.udnPodInfos[netName] if !ok { return nil } @@ -78,9 +78,9 @@ func splitPodIPMaskLength(podIP string) (int, string) { return prefixLen, ip.String() } -type option func(machine *secondaryNetworkExpectationMachine) +type option func(machine *userDefinedNetworkExpectationMachine) -type secondaryNetworkExpectationMachine struct { +type userDefinedNetworkExpectationMachine struct { fakeOvn *FakeOVN pods []testPod gatewayConfig *util.L3GatewayConfig @@ -88,8 +88,8 @@ type secondaryNetworkExpectationMachine struct { hasClusterPortGroup bool } -func newSecondaryNetworkExpectationMachine(fakeOvn *FakeOVN, pods []testPod, opts ...option) *secondaryNetworkExpectationMachine { - machine := &secondaryNetworkExpectationMachine{ +func newUserDefinedNetworkExpectationMachine(fakeOvn *FakeOVN, pods []testPod, opts ...option) *userDefinedNetworkExpectationMachine { + machine := &userDefinedNetworkExpectationMachine{ fakeOvn: fakeOvn, pods: pods, } @@ -101,37 +101,37 @@ func newSecondaryNetworkExpectationMachine(fakeOvn *FakeOVN, pods []testPod, opt } func withGatewayConfig(config *util.L3GatewayConfig) option { - return func(machine *secondaryNetworkExpectationMachine) { + return func(machine *userDefinedNetworkExpectationMachine) { machine.gatewayConfig = config } } func withInterconnectCluster() option { - return func(machine *secondaryNetworkExpectationMachine) { + return func(machine *userDefinedNetworkExpectationMachine) { machine.isInterconnectCluster = true } } func withClusterPortGroup() option { - return func(machine *secondaryNetworkExpectationMachine) { + return func(machine *userDefinedNetworkExpectationMachine) { machine.hasClusterPortGroup = true } } -func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPorts(isPrimary bool) []libovsdbtest.TestData { +func (em *userDefinedNetworkExpectationMachine) expectedLogicalSwitchesAndPorts(isPrimary bool) []libovsdbtest.TestData { return em.expectedLogicalSwitchesAndPortsWithLspEnabled(isPrimary, nil) } -func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPortsWithLspEnabled(isPrimary bool, expectedPodLspEnabled map[string]*bool) []libovsdbtest.TestData { +func (em *userDefinedNetworkExpectationMachine) expectedLogicalSwitchesAndPortsWithLspEnabled(isPrimary bool, expectedPodLspEnabled map[string]*bool) []libovsdbtest.TestData { data := []libovsdbtest.TestData{} - for _, ocInfo := range em.fakeOvn.secondaryControllers { + for _, ocInfo := range em.fakeOvn.userDefinedNetworkControllers { nodeslsps := make(map[string][]string) acls := make(map[string][]string) var switchName string switchNodeMap := make(map[string]*nbdb.LogicalSwitch) alreadyAddedManagementElements := make(map[string]struct{}) for _, pod := range em.pods { - podInfo, ok := pod.secondaryPodInfos[ocInfo.bnc.GetNetworkName()] + podInfo, ok := pod.udnPodInfos[ocInfo.bnc.GetNetworkName()] if !ok { continue } @@ -252,7 +252,7 @@ func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPortsWit } } - // TODO: once we start the "full" SecondaryLayer2NetworkController (instead of just Base) + // TODO: once we start the "full" Layer2UserDefinedNetworkController (instead of just Base) // we can drop this, and compare all objects created by the controller (right now we're // missing all the meters, and the COPP) if ocInfo.bnc.TopologyType() == ovntypes.Layer2Topology { @@ -453,7 +453,7 @@ func nonICClusterTestConfiguration(opts ...testConfigOpt) testConfiguration { return config } -func newMultiHomedKubevirtPod(vmName string, liveMigrationInfo liveMigrationPodInfo, testPod testPod, multiHomingConfigs ...secondaryNetInfo) *corev1.Pod { +func newMultiHomedKubevirtPod(vmName string, liveMigrationInfo liveMigrationPodInfo, testPod testPod, multiHomingConfigs ...userDefinedNetInfo) *corev1.Pod { pod := newMultiHomedPod(testPod, multiHomingConfigs...) pod.Labels[kubevirtv1.VirtualMachineNameLabel] = vmName pod.Status.Phase = liveMigrationInfo.podPhase @@ -464,7 +464,7 @@ func newMultiHomedKubevirtPod(vmName string, liveMigrationInfo liveMigrationPodI return pod } -func newMultiHomedPod(testPod testPod, multiHomingConfigs ...secondaryNetInfo) *corev1.Pod { +func newMultiHomedPod(testPod testPod, multiHomingConfigs ...userDefinedNetInfo) *corev1.Pod { pod := newPod(testPod.namespace, testPod.podName, testPod.nodeName, testPod.podIP) var secondaryNetworks []nadapi.NetworkSelectionElement if len(pod.Annotations) == 0 { @@ -494,7 +494,7 @@ func newMultiHomedPod(testPod testPod, multiHomingConfigs ...secondaryNetInfo) * serializedNetworkSelectionElements, _ := json.Marshal(secondaryNetworks) pod.Annotations[nadapi.NetworkAttachmentAnnot] = string(serializedNetworkSelectionElements) if config.OVNKubernetesFeature.EnableInterconnect { - dummyOVNNetAnnotations := dummyOVNPodNetworkAnnotations(testPod.secondaryPodInfos, multiHomingConfigs) + dummyOVNNetAnnotations := dummyOVNPodNetworkAnnotations(testPod.udnPodInfos, multiHomingConfigs) if dummyOVNNetAnnotations != "{}" { pod.Annotations["k8s.ovn.org/pod-networks"] = dummyOVNNetAnnotations } @@ -502,7 +502,7 @@ func newMultiHomedPod(testPod testPod, multiHomingConfigs ...secondaryNetInfo) * return pod } -func dummyOVNPodNetworkAnnotations(secondaryPodInfos map[string]*secondaryPodInfo, multiHomingConfigs []secondaryNetInfo) string { +func dummyOVNPodNetworkAnnotations(secondaryPodInfos map[string]*udnPodInfo, multiHomingConfigs []userDefinedNetInfo) string { var ovnPodNetworksAnnotations []byte podAnnotations := map[string]podAnnotation{} for i, netConfig := range multiHomingConfigs { @@ -523,7 +523,7 @@ func dummyOVNPodNetworkAnnotations(secondaryPodInfos map[string]*secondaryPodInf return string(ovnPodNetworksAnnotations) } -func dummyOVNPodNetworkAnnotationForNetwork(portInfo portInfo, netConfig secondaryNetInfo, tunnelID int) podAnnotation { +func dummyOVNPodNetworkAnnotationForNetwork(portInfo portInfo, netConfig userDefinedNetInfo, tunnelID int) podAnnotation { role := ovntypes.NetworkRoleSecondary if netConfig.isPrimary { role = ovntypes.NetworkRolePrimary diff --git a/go-controller/pkg/ovn/multipolicy_test.go b/go-controller/pkg/ovn/multipolicy_test.go index fc50dea5dd..240fd6b1a6 100644 --- a/go-controller/pkg/ovn/multipolicy_test.go +++ b/go-controller/pkg/ovn/multipolicy_test.go @@ -91,9 +91,9 @@ func convertNetPolicyToMultiNetPolicy(policy *knet.NetworkPolicy) *mnpapi.MultiN return &mpolicy } -func addPodNetwork(pod *corev1.Pod, secondaryPodInfos map[string]*secondaryPodInfo) { +func addPodNetwork(pod *corev1.Pod, udnPodInfos map[string]*udnPodInfo) { nadNames := []string{} - for _, podInfo := range secondaryPodInfos { + for _, podInfo := range udnPodInfos { for nadName := range podInfo.allportInfo { nadNames = append(nadNames, nadName) } @@ -104,11 +104,11 @@ func addPodNetwork(pod *corev1.Pod, secondaryPodInfos map[string]*secondaryPodIn pod.Annotations[nettypes.NetworkAttachmentAnnot] = strings.Join(nadNames, ",") } -func (p testPod) populateSecondaryNetworkLogicalSwitchCache(ocInfo secondaryControllerInfo) { +func (p testPod) populateUserDefinedNetworkLogicalSwitchCache(ocInfo userDefinedNetworkControllerInfo) { var err error switch ocInfo.bnc.TopologyType() { case ovntypes.Layer3Topology: - podInfo := p.secondaryPodInfos[ocInfo.bnc.GetNetworkName()] + podInfo := p.udnPodInfos[ocInfo.bnc.GetNetworkName()] err = ocInfo.bnc.lsManager.AddOrUpdateSwitch(ocInfo.bnc.GetNetworkScopedName(p.nodeName), []*net.IPNet{ovntest.MustParseIPNet(podInfo.nodeSubnet)}, nil) case ovntypes.Layer2Topology: subnet := ocInfo.bnc.Subnets()[0] @@ -120,13 +120,13 @@ func (p testPod) populateSecondaryNetworkLogicalSwitchCache(ocInfo secondaryCont gomega.Expect(err).NotTo(gomega.HaveOccurred()) } -func getExpectedDataPodsAndSwitchesForSecondaryNetwork(fakeOvn *FakeOVN, pods []testPod, netInfo util.NetInfo) []libovsdb.TestData { +func getExpectedDataPodsAndSwitchesForUserDefinedNetwork(fakeOvn *FakeOVN, pods []testPod, netInfo util.NetInfo) []libovsdb.TestData { data := []libovsdb.TestData{} - for _, ocInfo := range fakeOvn.secondaryControllers { + for _, ocInfo := range fakeOvn.userDefinedNetworkControllers { nodeslsps := make(map[string][]string) var switchName string for _, pod := range pods { - podInfo, ok := pod.secondaryPodInfos[ocInfo.bnc.GetNetworkName()] + podInfo, ok := pod.udnPodInfos[ocInfo.bnc.GetNetworkName()] if !ok { continue } @@ -187,15 +187,15 @@ func getExpectedDataPodsAndSwitchesForSecondaryNetwork(fakeOvn *FakeOVN, pods [] var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { const ( - namespaceName1 = "namespace1" - namespaceName2 = "namespace2" - netPolicyName1 = "networkpolicy1" - nodeName = "node1" - secondaryNetworkName = "network1" - nadName = "nad1" - labelName string = "pod-name" - labelVal string = "server" - portNum int32 = 81 + namespaceName1 = "namespace1" + namespaceName2 = "namespace2" + netPolicyName1 = "networkpolicy1" + nodeName = "node1" + userDefinedNetworkName = "network1" + nadName = "nad1" + labelName string = "pod-name" + labelVal string = "server" + portNum int32 = 81 ) var ( app *cli.App @@ -238,14 +238,14 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { format.MaxLength = gomegaFormatMaxLength }) - // setSecondaryNetworkTestData sets relevant test data (NAD, NetInfo & NB DB - // initial data) assuming a secondary network of the given topoloy and + // setUserDefinedNetworkTestData sets relevant test data (NAD, NetInfo & NB DB + // initial data) assuming a user-defined network of the given topology and // subnet - setSecondaryNetworkTestData := func(topology, subnets string) { + setUserDefinedNetworkTestData := func(topology, subnets string) { nadNamespacedName = util.GetNADName(namespaceName1, nadName) netconf := ovncnitypes.NetConf{ NetConf: cnitypes.NetConf{ - Name: secondaryNetworkName, + Name: userDefinedNetworkName, Type: "ovn-k8s-cni-overlay", }, Topology: topology, @@ -278,7 +278,7 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { Name: netInfo.GetNetworkScopedName(ovntypes.OVNLayer2Switch), UUID: netInfo.GetNetworkScopedName(ovntypes.OVNLayer2Switch) + "_UUID", ExternalIDs: map[string]string{ - ovntypes.NetworkExternalID: secondaryNetworkName, + ovntypes.NetworkExternalID: userDefinedNetworkName, ovntypes.NetworkRoleExternalID: getNetworkRole(netInfo), }, }) @@ -287,7 +287,7 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { Name: netInfo.GetNetworkScopedName(ovntypes.OVNLocalnetSwitch), UUID: netInfo.GetNetworkScopedName(ovntypes.OVNLocalnetSwitch) + "_UUID", ExternalIDs: map[string]string{ - ovntypes.NetworkExternalID: secondaryNetworkName, + ovntypes.NetworkExternalID: userDefinedNetworkName, ovntypes.NetworkRoleExternalID: getNetworkRole(netInfo), }, }) @@ -303,7 +303,7 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { if len(podLabels) > 0 { knetPod.Labels = podLabels } - addPodNetwork(knetPod, testPod.secondaryPodInfos) + addPodNetwork(knetPod, testPod.udnPodInfos) setPodAnnotations(knetPod, testPod) podsList = append(podsList, *knetPod) } @@ -357,13 +357,13 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { err = fakeOvn.controller.WatchNetworkPolicy() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ocInfo, ok := fakeOvn.secondaryControllers[secondaryNetworkName] + ocInfo, ok := fakeOvn.userDefinedNetworkControllers[userDefinedNetworkName] gomega.Expect(ok).To(gomega.BeTrue()) asf := ocInfo.asf gomega.Expect(asf).NotTo(gomega.BeNil()) - gomega.Expect(asf.ControllerName).To(gomega.Equal(getNetworkControllerName(secondaryNetworkName))) + gomega.Expect(asf.ControllerName).To(gomega.Equal(getNetworkControllerName(userDefinedNetworkName))) - for _, ocInfo := range fakeOvn.secondaryControllers { + for _, ocInfo := range fakeOvn.userDefinedNetworkControllers { // localnet topology can't watch for nodes if watchNodes && ocInfo.bnc.TopologyType() != ovntypes.LocalnetTopology { if ocInfo.bnc.TopologyType() == ovntypes.Layer3Topology && config.OVNKubernetesFeature.EnableInterconnect { @@ -385,7 +385,7 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { } for _, testPod := range pods { - testPod.populateSecondaryNetworkLogicalSwitchCache(ocInfo) + testPod.populateUserDefinedNetworkLogicalSwitchCache(ocInfo) } if pods != nil { err = ocInfo.bnc.WatchPods() @@ -399,9 +399,9 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { getUpdatedInitialDB := func(tPods []testPod) []libovsdb.TestData { updatedSwitchAndPods := getDefaultNetExpectedPodsAndSwitches(tPods, []string{nodeName}) - secondarySwitchAndPods := getExpectedDataPodsAndSwitchesForSecondaryNetwork(fakeOvn, tPods, netInfo) - if len(secondarySwitchAndPods) != 0 { - updatedSwitchAndPods = append(updatedSwitchAndPods, secondarySwitchAndPods...) + udnSwitchesAndPods := getExpectedDataPodsAndSwitchesForUserDefinedNetwork(fakeOvn, tPods, netInfo) + if len(udnSwitchesAndPods) != 0 { + updatedSwitchAndPods = append(updatedSwitchAndPods, udnSwitchesAndPods...) } return append(getHairpinningACLsV4AndPortGroup(), updatedSwitchAndPods...) } @@ -413,7 +413,7 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { topology := ovntypes.Layer2Topology subnets := "10.1.0.0/24" - setSecondaryNetworkTestData(topology, subnets) + setUserDefinedNetworkTestData(topology, subnets) namespace1 := *newNamespace(namespaceName1) namespace2 := *newNamespace(namespaceName2) @@ -436,7 +436,7 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { Get(context.TODO(), mpolicy.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ocInfo := fakeOvn.secondaryControllers[secondaryNetworkName] + ocInfo := fakeOvn.userDefinedNetworkControllers[userDefinedNetworkName] ocInfo.asf.EventuallyExpectEmptyAddressSetExist(namespaceName1) ocInfo.asf.EventuallyExpectEmptyAddressSetExist(namespaceName2) @@ -459,11 +459,11 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { topology := ovntypes.Layer2Topology subnets := "10.1.0.0/24" - setSecondaryNetworkTestData(topology, subnets) + setUserDefinedNetworkTestData(topology, subnets) namespace1 := *newNamespace(namespaceName1) nPodTest := getTestPod(namespace1.Name, nodeName) - nPodTest.addNetwork(secondaryNetworkName, nadNamespacedName, "", "", "", "10.1.1.1", "0a:58:0a:01:01:01", "secondary", 1, nil) + nPodTest.addNetwork(userDefinedNetworkName, nadNamespacedName, "", "", "", "10.1.1.1", "0a:58:0a:01:01:01", "secondary", 1, nil) networkPolicy := getPortNetworkPolicy(netPolicyName1, namespace1.Name, labelName, labelVal, portNum) watchNodes := false @@ -504,8 +504,8 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { Get(context.TODO(), mpolicy.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ocInfo := fakeOvn.secondaryControllers[secondaryNetworkName] - portInfo := nPodTest.getNetworkPortInfo(secondaryNetworkName, nadNamespacedName) + ocInfo := fakeOvn.userDefinedNetworkControllers[userDefinedNetworkName] + portInfo := nPodTest.getNetworkPortInfo(userDefinedNetworkName, nadNamespacedName) gomega.Expect(portInfo).NotTo(gomega.BeNil()) ocInfo.asf.ExpectAddressSetWithAddresses(namespaceName1, []string{portInfo.podIP}) @@ -551,7 +551,7 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { nodeSubnet = "10.1.1.0/24" } - setSecondaryNetworkTestData(topology, subnets) // here I set network role if layer2 + setUserDefinedNetworkTestData(topology, subnets) // here I set network role if layer2 watchNodes := true node := *newNode(nodeName, "192.168.126.202/24") @@ -561,7 +561,7 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { node.Annotations, err = util.UpdateNodeHostSubnetAnnotation( node.Annotations, ovntest.MustParseIPNets(nodeSubnet), - secondaryNetworkName, + userDefinedNetworkName, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -574,7 +574,7 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { node.Annotations, err = util.UpdateNetworkIDAnnotation(node.Annotations, ovntypes.DefaultNetworkName, 0) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if topology != ovntypes.LocalnetTopology { - node.Annotations, err = util.UpdateNetworkIDAnnotation(node.Annotations, secondaryNetworkName, 2) + node.Annotations, err = util.UpdateNetworkIDAnnotation(node.Annotations, userDefinedNetworkName, 2) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } @@ -585,7 +585,7 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { startOvn(initialDB, watchNodes, []corev1.Node{node}, []corev1.Namespace{namespace1}, nil, nil, []nettypes.NetworkAttachmentDefinition{*nad}, []testPod{}, map[string]string{labelName: labelVal}) - ocInfo := fakeOvn.secondaryControllers[secondaryNetworkName] + ocInfo := fakeOvn.userDefinedNetworkControllers[userDefinedNetworkName] // check that the node zone is tracked as expected if topology != ovntypes.LocalnetTopology { @@ -596,12 +596,12 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { ocInfo.asf.EventuallyExpectEmptyAddressSetExist(namespaceName1) nPodTest := getTestPod(namespace1.Name, nodeName) - nPodTest.addNetwork(secondaryNetworkName, nadNamespacedName, nodeSubnet, "", "", "10.1.1.1", "0a:58:0a:01:01:01", "secondary", 1, nil) + nPodTest.addNetwork(userDefinedNetworkName, nadNamespacedName, nodeSubnet, "", "", "10.1.1.1", "0a:58:0a:01:01:01", "secondary", 1, nil) knetPod := newPod(nPodTest.namespace, nPodTest.podName, nPodTest.nodeName, nPodTest.podIP) - addPodNetwork(knetPod, nPodTest.secondaryPodInfos) + addPodNetwork(knetPod, nPodTest.udnPodInfos) setPodAnnotations(knetPod, nPodTest) nPodTest.populateLogicalSwitchCache(fakeOvn) - nPodTest.populateSecondaryNetworkLogicalSwitchCache(ocInfo) + nPodTest.populateUserDefinedNetworkLogicalSwitchCache(ocInfo) ginkgo.By("Creating a pod attached to the secondary network") _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(nPodTest.namespace).Create(context.TODO(), knetPod, metav1.CreateOptions{}) @@ -609,8 +609,8 @@ var _ = ginkgo.Describe("OVN MultiNetworkPolicy Operations", func() { if topology == ovntypes.Layer2Topology && remote { // add the transit switch port bindings on behalf of ovn-controller - // so that the added pod is eventually processed succesfuly - transistSwitchPortName := util.GetSecondaryNetworkLogicalPortName(nPodTest.namespace, nPodTest.podName, nadNamespacedName) + // so that the added pod is eventually processed successfully + transistSwitchPortName := util.GetUserDefinedNetworkLogicalPortName(nPodTest.namespace, nPodTest.podName, nadNamespacedName) transistSwitchName := netInfo.GetNetworkScopedName(ovntypes.OVNLayer2Switch) err = libovsdb.CreateTransitSwitchPortBindings(fakeOvn.sbClient, transistSwitchName, transistSwitchPortName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/go-controller/pkg/ovn/ovn_test.go b/go-controller/pkg/ovn/ovn_test.go index 0a1b9e3c8f..8c0fbcd54e 100644 --- a/go-controller/pkg/ovn/ovn_test.go +++ b/go-controller/pkg/ovn/ovn_test.go @@ -67,8 +67,8 @@ const ( ovnClusterPortGroupUUID = fakePgUUID ) -type secondaryControllerInfo struct { - bnc *BaseSecondaryNetworkController +type userDefinedNetworkControllerInfo struct { + bnc *BaseUserDefinedNetworkController asf *addressset.FakeAddressSetFactory } @@ -91,9 +91,9 @@ type FakeOVN struct { eIPController *EgressIPController portCache *PortCache - // information map of all secondary network controllers - secondaryControllers map[string]secondaryControllerInfo - fullSecondaryL2Controllers map[string]*SecondaryLayer2NetworkController + // information map of all UDN controllers + userDefinedNetworkControllers map[string]userDefinedNetworkControllerInfo + fullL2UDNControllers map[string]*Layer2UserDefinedNetworkController } // NOTE: the FakeAddressSetFactory is no longer needed and should no longer be used. starting to phase out FakeAddressSetFactory @@ -109,8 +109,8 @@ func NewFakeOVN(useFakeAddressSet bool) *FakeOVN { egressSVCWg: &sync.WaitGroup{}, anpWg: &sync.WaitGroup{}, - secondaryControllers: map[string]secondaryControllerInfo{}, - fullSecondaryL2Controllers: map[string]*SecondaryLayer2NetworkController{}, + userDefinedNetworkControllers: map[string]userDefinedNetworkControllerInfo{}, + fullL2UDNControllers: map[string]*Layer2UserDefinedNetworkController{}, } } @@ -196,7 +196,7 @@ func (o *FakeOVN) shutdown() { o.egressSVCWg.Wait() o.anpWg.Wait() o.nbsbCleanup.Cleanup() - for _, ocInfo := range o.secondaryControllers { + for _, ocInfo := range o.userDefinedNetworkControllers { close(ocInfo.bnc.stopChan) ocInfo.bnc.cancelableCtx.Cancel() ocInfo.bnc.wg.Wait() @@ -266,7 +266,7 @@ func (o *FakeOVN) init(nadList []nettypes.NetworkAttachmentDefinition) { setupCOPP := false setupClusterController(o.controller, setupCOPP) for _, n := range nadList { - err := o.NewSecondaryNetworkController(&n) + err := o.NewUserDefinedNetworkController(&n) gomega.Expect(err).NotTo(gomega.HaveOccurred()) } @@ -280,9 +280,9 @@ func (o *FakeOVN) init(nadList []nettypes.NetworkAttachmentDefinition) { if err == nil { for _, node := range existingNodes { o.controller.localZoneNodes.Store(node.Name, true) - for _, secondaryController := range o.secondaryControllers { - if secondaryController.bnc.localZoneNodes != nil { - secondaryController.bnc.localZoneNodes.Store(node.Name, true) + for _, udnController := range o.userDefinedNetworkControllers { + if udnController.bnc.localZoneNodes != nil { + udnController.bnc.localZoneNodes.Store(node.Name, true) } } } @@ -497,9 +497,9 @@ func newNetworkAttachmentDefinition(namespace, name string, netconf ovncnitypes. }, nil } -func (o *FakeOVN) NewSecondaryNetworkController(netattachdef *nettypes.NetworkAttachmentDefinition) error { - var ocInfo secondaryControllerInfo - var secondaryController *BaseSecondaryNetworkController +func (o *FakeOVN) NewUserDefinedNetworkController(netattachdef *nettypes.NetworkAttachmentDefinition) error { + var ocInfo userDefinedNetworkControllerInfo + var userDefinedNetworkController *BaseUserDefinedNetworkController var ok bool nadName := util.GetNADName(netattachdef.Namespace, netattachdef.Name) @@ -509,7 +509,7 @@ func (o *FakeOVN) NewSecondaryNetworkController(netattachdef *nettypes.NetworkAt } netName := nInfo.GetNetworkName() topoType := nInfo.TopologyType() - ocInfo, ok = o.secondaryControllers[netName] + ocInfo, ok = o.userDefinedNetworkControllers[netName] if !ok { nbZoneFailed := false // Try to get the NBZone. If there is an error, create NB_Global record. @@ -548,31 +548,31 @@ func (o *FakeOVN) NewSecondaryNetworkController(netattachdef *nettypes.NetworkAt switch topoType { case types.Layer3Topology: - l3Controller, err := NewSecondaryLayer3NetworkController(cnci, nInfo, o.networkManager.Interface(), nil, o.eIPController, o.portCache) + l3Controller, err := NewLayer3UserDefinedNetworkController(cnci, nInfo, o.networkManager.Interface(), nil, o.eIPController, o.portCache) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if o.asf != nil { // use fake asf only when enabled l3Controller.addressSetFactory = asf } - secondaryController = &l3Controller.BaseSecondaryNetworkController + userDefinedNetworkController = &l3Controller.BaseUserDefinedNetworkController case types.Layer2Topology: - l2Controller, err := NewSecondaryLayer2NetworkController(cnci, nInfo, o.networkManager.Interface(), nil, o.portCache, o.eIPController) + l2Controller, err := NewLayer2UserDefinedNetworkController(cnci, nInfo, o.networkManager.Interface(), nil, o.portCache, o.eIPController) gomega.Expect(err).NotTo(gomega.HaveOccurred()) if o.asf != nil { // use fake asf only when enabled l2Controller.addressSetFactory = asf } - secondaryController = &l2Controller.BaseSecondaryNetworkController - o.fullSecondaryL2Controllers[netName] = l2Controller + userDefinedNetworkController = &l2Controller.BaseUserDefinedNetworkController + o.fullL2UDNControllers[netName] = l2Controller case types.LocalnetTopology: - localnetController := NewSecondaryLocalnetNetworkController(cnci, nInfo, o.networkManager.Interface()) + localnetController := NewLocalnetUserDefinedNetworkController(cnci, nInfo, o.networkManager.Interface()) if o.asf != nil { // use fake asf only when enabled localnetController.addressSetFactory = asf } - secondaryController = &localnetController.BaseSecondaryNetworkController + userDefinedNetworkController = &localnetController.BaseUserDefinedNetworkController default: return fmt.Errorf("topology type %s not supported", topoType) } - ocInfo = secondaryControllerInfo{bnc: secondaryController, asf: asf} - o.secondaryControllers[netName] = ocInfo + ocInfo = userDefinedNetworkControllerInfo{bnc: userDefinedNetworkController, asf: asf} + o.userDefinedNetworkControllers[netName] = ocInfo if nbZoneFailed { // Delete the NBGlobal row as this function created it. Otherwise many tests would fail while @@ -581,13 +581,14 @@ func (o *FakeOVN) NewSecondaryNetworkController(netattachdef *nettypes.NetworkAt gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } else { - secondaryController = ocInfo.bnc + userDefinedNetworkController = ocInfo.bnc } - ginkgo.By(fmt.Sprintf("OVN test init: add NAD %s to secondary network controller of %s network %s", nadName, topoType, netName)) - mutableNetInfo := util.NewMutableNetInfo(secondaryController.GetNetInfo()) + ginkgo.By(fmt.Sprintf("OVN test init: add NAD %s to user-defined network controller of %s network %s", nadName, topoType, netName)) + mutableNetInfo := util.NewMutableNetInfo(userDefinedNetworkController.GetNetInfo()) mutableNetInfo.AddNADs(nadName) - _ = util.ReconcileNetInfo(secondaryController.ReconcilableNetInfo, mutableNetInfo) + err = util.ReconcileNetInfo(userDefinedNetworkController.ReconcilableNetInfo, mutableNetInfo) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) return nil } diff --git a/go-controller/pkg/ovn/pods_test.go b/go-controller/pkg/ovn/pods_test.go index c59a841d75..76383189a5 100644 --- a/go-controller/pkg/ovn/pods_test.go +++ b/go-controller/pkg/ovn/pods_test.go @@ -224,10 +224,10 @@ type testPod struct { noIfaceIdVer bool networkRole string - secondaryPodInfos map[string]*secondaryPodInfo + udnPodInfos map[string]*udnPodInfo } -type secondaryPodInfo struct { +type udnPodInfo struct { nodeSubnet string nodeMgtIP string nodeGWIP string @@ -248,18 +248,18 @@ type portInfo struct { func newTPod(nodeName, nodeSubnet, nodeMgtIP, nodeGWIP, podName, podIPs, podMAC, namespace string) testPod { portName := util.GetLogicalPortName(namespace, podName) to := testPod{ - portUUID: portName + "-UUID", - nodeSubnet: nodeSubnet, - nodeMgtIP: nodeMgtIP, - nodeGWIP: nodeGWIP, - podIP: podIPs, - podMAC: podMAC, - portName: portName, - nodeName: nodeName, - podName: podName, - namespace: namespace, - secondaryPodInfos: map[string]*secondaryPodInfo{}, - networkRole: ovntypes.NetworkRolePrimary, // all tests here run with network-segmentation disabled by default by default + portUUID: portName + "-UUID", + nodeSubnet: nodeSubnet, + nodeMgtIP: nodeMgtIP, + nodeGWIP: nodeGWIP, + podIP: podIPs, + podMAC: podMAC, + portName: portName, + nodeName: nodeName, + podName: podName, + namespace: namespace, + udnPodInfos: map[string]*udnPodInfo{}, + networkRole: ovntypes.NetworkRolePrimary, // all tests here run with network-segmentation disabled by default by default } var routeSources []*net.IPNet @@ -393,11 +393,11 @@ func (p testPod) getAnnotationsJson() string { }, } - for _, portInfos := range p.secondaryPodInfos { - var secondaryIfaceRoutes []podRoute + for _, portInfos := range p.udnPodInfos { + var udnIfaceRoutes []podRoute for _, route := range portInfos.routes { - secondaryIfaceRoutes = append( - secondaryIfaceRoutes, + udnIfaceRoutes = append( + udnIfaceRoutes, podRoute{Dest: route.Dest.String(), NextHop: route.NextHop.String()}, ) } @@ -416,7 +416,7 @@ func (p testPod) getAnnotationsJson() string { IPs: []string{ip}, TunnelID: portInfo.tunnelID, Role: portInfos.role, - Routes: secondaryIfaceRoutes, + Routes: udnIfaceRoutes, } if portInfos.nodeGWIP != "" { podAnnotation.Gateway = portInfos.nodeGWIP @@ -458,7 +458,7 @@ func getExpectedDataPodsSwitchesPortGroup(netInfo util.NetInfo, pods []testPod, if netInfo.IsDefault() { portName = util.GetLogicalPortName(pod.namespace, pod.podName) } else { - portName = util.GetSecondaryNetworkLogicalPortName(pod.namespace, pod.podName, netInfo.GetNADs()[0]) + portName = util.GetUserDefinedNetworkLogicalPortName(pod.namespace, pod.podName, netInfo.GetNADs()[0]) } var lspUUID string if len(pod.portUUID) == 0 { diff --git a/go-controller/pkg/ovn/port_cache.go b/go-controller/pkg/ovn/port_cache.go index 4148e840a5..9dbee646e3 100644 --- a/go-controller/pkg/ovn/port_cache.go +++ b/go-controller/pkg/ovn/port_cache.go @@ -47,7 +47,7 @@ func (c *PortCache) get(pod *corev1.Pod, nadName string) (*lpInfo, error) { if nadName == types.DefaultNetworkName { logicalPort = util.GetLogicalPortName(pod.Namespace, pod.Name) } else { - logicalPort = util.GetSecondaryNetworkLogicalPortName(pod.Namespace, pod.Name, nadName) + logicalPort = util.GetUserDefinedNetworkLogicalPortName(pod.Namespace, pod.Name, nadName) } c.RLock() defer c.RUnlock() @@ -82,7 +82,7 @@ func (c *PortCache) add(pod *corev1.Pod, logicalSwitch, nadName, uuid string, ma if nadName == types.DefaultNetworkName { logicalPort = util.GetLogicalPortName(pod.Namespace, pod.Name) } else { - logicalPort = util.GetSecondaryNetworkLogicalPortName(pod.Namespace, pod.Name, nadName) + logicalPort = util.GetUserDefinedNetworkLogicalPortName(pod.Namespace, pod.Name, nadName) } c.Lock() defer c.Unlock() @@ -112,7 +112,7 @@ func (c *PortCache) remove(pod *corev1.Pod, nadName string) { if nadName == types.DefaultNetworkName { logicalPort = util.GetLogicalPortName(pod.Namespace, pod.Name) } else { - logicalPort = util.GetSecondaryNetworkLogicalPortName(pod.Namespace, pod.Name, nadName) + logicalPort = util.GetUserDefinedNetworkLogicalPortName(pod.Namespace, pod.Name, nadName) } c.Lock() diff --git a/go-controller/pkg/ovn/topology/topologyfactory.go b/go-controller/pkg/ovn/topology/topologyfactory.go index b20743a242..45738cf85f 100644 --- a/go-controller/pkg/ovn/topology/topologyfactory.go +++ b/go-controller/pkg/ovn/topology/topologyfactory.go @@ -55,7 +55,7 @@ func (gtf *GatewayTopologyFactory) newClusterRouter( Options: routerOptions, Copp: &coopUUID, } - if netInfo.IsSecondary() { + if netInfo.IsUserDefinedNetwork() { logicalRouter.ExternalIDs[types.NetworkExternalID] = netInfo.GetNetworkName() logicalRouter.ExternalIDs[types.TopologyExternalID] = netInfo.TopologyType() } @@ -84,7 +84,7 @@ func (gtf *GatewayTopologyFactory) NewJoinSwitch( logicalSwitch := nbdb.LogicalSwitch{ Name: joinSwitchName, } - if netInfo.IsSecondary() { + if netInfo.IsUserDefinedNetwork() { logicalSwitch.ExternalIDs = map[string]string{ types.NetworkExternalID: netInfo.GetNetworkName(), types.TopologyExternalID: netInfo.TopologyType(), @@ -111,7 +111,7 @@ func (gtf *GatewayTopologyFactory) NewJoinSwitch( MAC: gwLRPMAC.String(), Networks: gwLRPNetworks, } - if netInfo.IsSecondary() { + if netInfo.IsUserDefinedNetwork() { logicalRouterPort.ExternalIDs = map[string]string{ types.NetworkExternalID: netInfo.GetNetworkName(), types.TopologyExternalID: netInfo.TopologyType(), diff --git a/go-controller/pkg/ovn/udn_isolation.go b/go-controller/pkg/ovn/udn_isolation.go index 3403b0a9a7..d0182603aa 100644 --- a/go-controller/pkg/ovn/udn_isolation.go +++ b/go-controller/pkg/ovn/udn_isolation.go @@ -19,19 +19,31 @@ import ( addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/batching" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" ) const ( // UDN ACL names, should be unique across all controllers // Default network-only ACLs: - AllowHostARPACL = "AllowHostARPSecondary" - AllowHostSecondaryACL = "AllowHostSecondary" - DenySecondaryACL = "DenySecondary" + allowHostARPACL = "AllowHostARPPrimaryUDN" + allowHostPrimaryUDNACL = "AllowHostPrimaryUDN" + denyPrimaryUDNACL = "DenyPrimaryUDN" // OpenPortACLPrefix is used to build per-pod ACLs, pod name should be added to the prefix to build a unique name OpenPortACLPrefix = "OpenPort-" // the same tier is used for all UDN isolation ACLs isolationTier = types.PrimaryACLTier + + // Port Group ID for pods with primary UDN + // Note, this is left with wording "Secondary" because we do not currently allow + // mutating a port group's name. ACL match criteria may reference this name, so it + // is unsafe to update. Therefore we keep the legacy name for now. + legacySecondaryPodPGName = "SecondaryPods" + + // deprecated Legacy versions + allowHostSecondaryACL = "AllowHostSecondary" + denySecondaryACL = "DenySecondary" + legacyAllowHostARPACL = "AllowHostARPSecondary" ) // setupUDNACLs should be called after the node's management port was configured @@ -40,7 +52,7 @@ func (oc *DefaultNetworkController) setupUDNACLs(mgmtPortIPs []net.IP) error { if !util.IsNetworkSegmentationSupportEnabled() { return nil } - // add port group to track secondary pods + // add port group to track UDN primary pods pgIDs := oc.getSecondaryPodsPortGroupDbIDs() pg := &nbdb.PortGroup{ Name: libovsdbutil.GetPortGroupName(pgIDs), @@ -63,7 +75,7 @@ func (oc *DefaultNetworkController) setupUDNACLs(mgmtPortIPs []net.IP) error { // - ingress -> allow-related all from mgmtPort // - egress+ingress -> deny everything else pgName := libovsdbutil.GetPortGroupName(pgIDs) - egressDenyIDs := oc.getUDNACLDbIDs(DenySecondaryACL, libovsdbutil.ACLEgress) + egressDenyIDs := oc.getUDNACLDbIDs(denyPrimaryUDNACL, libovsdbutil.ACLEgress) match := libovsdbutil.GetACLMatch(pgName, "", libovsdbutil.ACLEgress) egressDenyACL := libovsdbutil.BuildACL(egressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, nil, libovsdbutil.LportEgress, isolationTier) @@ -90,22 +102,22 @@ func (oc *DefaultNetworkController) setupUDNACLs(mgmtPortIPs []net.IP) error { return match } - egressARPIDs := oc.getUDNACLDbIDs(AllowHostARPACL, libovsdbutil.ACLEgress) + egressARPIDs := oc.getUDNACLDbIDs(allowHostARPACL, libovsdbutil.ACLEgress) match = libovsdbutil.GetACLMatch(pgName, getARPMatch(libovsdbutil.ACLEgress), libovsdbutil.ACLEgress) egressARPACL := libovsdbutil.BuildACL(egressARPIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllow, nil, libovsdbutil.LportEgress, isolationTier) - ingressDenyIDs := oc.getUDNACLDbIDs(DenySecondaryACL, libovsdbutil.ACLIngress) + ingressDenyIDs := oc.getUDNACLDbIDs(denyPrimaryUDNACL, libovsdbutil.ACLIngress) match = libovsdbutil.GetACLMatch(pgName, "", libovsdbutil.ACLIngress) ingressDenyACL := libovsdbutil.BuildACL(ingressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, nil, libovsdbutil.LportIngress, isolationTier) - ingressARPIDs := oc.getUDNACLDbIDs(AllowHostARPACL, libovsdbutil.ACLIngress) + ingressARPIDs := oc.getUDNACLDbIDs(allowHostARPACL, libovsdbutil.ACLIngress) match = libovsdbutil.GetACLMatch(pgName, getARPMatch(libovsdbutil.ACLIngress), libovsdbutil.ACLIngress) ingressARPACL := libovsdbutil.BuildACL(ingressARPIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllow, nil, libovsdbutil.LportIngress, isolationTier) - ingressAllowIDs := oc.getUDNACLDbIDs(AllowHostSecondaryACL, libovsdbutil.ACLIngress) + ingressAllowIDs := oc.getUDNACLDbIDs(allowHostPrimaryUDNACL, libovsdbutil.ACLIngress) match = "(" for i, mgmtPortIP := range mgmtPortIPs { ipFamily := "ip4" @@ -140,7 +152,7 @@ func (oc *DefaultNetworkController) setupUDNACLs(mgmtPortIPs []net.IP) error { func (oc *DefaultNetworkController) getSecondaryPodsPortGroupDbIDs() *libovsdbops.DbObjectIDs { return libovsdbops.NewDbObjectIDs(libovsdbops.PortGroupUDN, oc.controllerName, map[libovsdbops.ExternalIDKey]string{ - libovsdbops.ObjectNameKey: "SecondaryPods", + libovsdbops.ObjectNameKey: legacySecondaryPodPGName, }) } @@ -415,3 +427,43 @@ func (bnc *BaseNetworkController) deleteAdvertisedNetworkIsolation(nodeName stri _, err = libovsdbops.TransactAndCheck(bnc.nbClient, ops) return err } + +func (oc *DefaultNetworkController) syncUDNIsolation() error { + // Find ACLs with old "secondary" naming IDs, update them + type aclUpdate struct { + old *libovsdbops.DbObjectIDs + new *libovsdbops.DbObjectIDs + } + updates := []*aclUpdate{ + {oc.getUDNACLDbIDs(denySecondaryACL, libovsdbutil.ACLEgress), oc.getUDNACLDbIDs(denyPrimaryUDNACL, libovsdbutil.ACLEgress)}, + {oc.getUDNACLDbIDs(legacyAllowHostARPACL, libovsdbutil.ACLEgress), oc.getUDNACLDbIDs(allowHostARPACL, libovsdbutil.ACLEgress)}, + {oc.getUDNACLDbIDs(denySecondaryACL, libovsdbutil.ACLIngress), oc.getUDNACLDbIDs(denyPrimaryUDNACL, libovsdbutil.ACLIngress)}, + {oc.getUDNACLDbIDs(legacyAllowHostARPACL, libovsdbutil.ACLIngress), oc.getUDNACLDbIDs(allowHostARPACL, libovsdbutil.ACLIngress)}, + {oc.getUDNACLDbIDs(allowHostSecondaryACL, libovsdbutil.ACLIngress), oc.getUDNACLDbIDs(allowHostPrimaryUDNACL, libovsdbutil.ACLIngress)}, + } + + aclsToUpdate := make([]*nbdb.ACL, 0) + for _, update := range updates { + legacyACLs, err := libovsdbops.FindACLsWithPredicate(oc.nbClient, libovsdbops.GetPredicate[*nbdb.ACL](update.old, nil)) + if err != nil { + return fmt.Errorf("unable to find ACLs for UDN Isolation sync: %w", err) + } + for _, acl := range legacyACLs { + externalIDs := update.new.GetExternalIDs() + acl.ExternalIDs = externalIDs + aclName := libovsdbutil.GetACLName(update.new) + acl.Name = &aclName + aclsToUpdate = append(aclsToUpdate, acl) + } + } + if len(aclsToUpdate) > 0 { + err := batching.Batch[*nbdb.ACL](20000, aclsToUpdate, func(batchACLs []*nbdb.ACL) error { + return libovsdbops.CreateOrUpdateACLs(oc.nbClient, oc.GetSamplingConfig(), batchACLs...) + }) + if err != nil { + return fmt.Errorf("failed to create or update UDN ACLs: %w", err) + } + } + + return nil +} diff --git a/go-controller/pkg/ovn/udn_isolation_test.go b/go-controller/pkg/ovn/udn_isolation_test.go index 2b3afda328..b699ecbdd5 100644 --- a/go-controller/pkg/ovn/udn_isolation_test.go +++ b/go-controller/pkg/ovn/udn_isolation_test.go @@ -25,7 +25,7 @@ var _ = Describe("UDN Isolation", func() { // build port group with one ACL that has default tier pgIDs := fakeController.getSecondaryPodsPortGroupDbIDs() pgName := libovsdbutil.GetPortGroupName(pgIDs) - egressDenyIDs := fakeController.getUDNACLDbIDs(DenySecondaryACL, libovsdbutil.ACLEgress) + egressDenyIDs := fakeController.getUDNACLDbIDs(denyPrimaryUDNACL, libovsdbutil.ACLEgress) match := libovsdbutil.GetACLMatch(pgName, "", libovsdbutil.ACLEgress) // in the real code we use BuildACL here instead of BuildACLWithDefaultTier egressDenyACL := libovsdbutil.BuildACLWithDefaultTier(egressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, @@ -50,4 +50,43 @@ var _ = Describe("UDN Isolation", func() { Expect(acls).To(HaveLen(1)) Expect(acls[0].Tier).To(Equal(types.PrimaryACLTier)) }) + + It("Should handle syncing legacy DBIDs", func() { + config.OVNKubernetesFeature.EnableMultiNetwork = true + config.OVNKubernetesFeature.EnableNetworkSegmentation = true + fakeController := getFakeController(DefaultNetworkControllerName) + + By("initializing the database with legacy secondary IDs") + pgIDs := fakeController.getSecondaryPodsPortGroupDbIDs() + pgName := libovsdbutil.GetPortGroupName(pgIDs) + egressDenyIDs := fakeController.getUDNACLDbIDs(denySecondaryACL, libovsdbutil.ACLEgress) + match := libovsdbutil.GetACLMatch(pgName, "", libovsdbutil.ACLEgress) + egressDenyACL := libovsdbutil.BuildACL(egressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, + nil, libovsdbutil.LportEgress, isolationTier) + // required to make sure port group correctly references the ACL + egressDenyACL.UUID = egressDenyIDs.String() + "-UUID" + + pg := libovsdbutil.BuildPortGroup(pgIDs, nil, []*nbdb.ACL{egressDenyACL}) + + nbClient, nbCleanup, err := libovsdbtest.NewNBTestHarness(libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{egressDenyACL, pg}, + }, nil) + Expect(err).NotTo(HaveOccurred()) + defer nbCleanup.Cleanup() + fakeController.nbClient = nbClient + By("running UDN Isolation sync to update ACLs") + Expect(fakeController.syncUDNIsolation()).To(Succeed()) + By("expect updated port group with proper external_ids") + pgs, err := libovsdbops.FindPortGroupsWithPredicate(nbClient, func(_ *nbdb.PortGroup) bool { return true }) + Expect(err).NotTo(HaveOccurred()) + Expect(pgs).To(HaveLen(1)) + Expect(pgs[0].ExternalIDs).To(Equal(fakeController.getSecondaryPodsPortGroupDbIDs().GetExternalIDs())) + By("expect updated ACL with proper external_ids") + acls, err := libovsdbops.FindACLsWithPredicate(nbClient, func(_ *nbdb.ACL) bool { return true }) + Expect(err).NotTo(HaveOccurred()) + Expect(acls).To(HaveLen(1)) + Expect(acls[0].ExternalIDs).To(Equal(fakeController.getUDNACLDbIDs(denyPrimaryUDNACL, libovsdbutil.ACLEgress).GetExternalIDs())) + By("expect updated ACL with proper name") + Expect(*acls[0].Name).To(BeEmpty()) + }) }) diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go index 4c144d65cc..1549bf5481 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go @@ -34,7 +34,7 @@ const ( /* * ZoneInterconnectHandler manages OVN resources required for interconnecting * multiple zones. This handler exposes functions which a network controller - * (default and secondary) is expected to call on different events. + * (default and UDN) is expected to call on different events. * For routed topologies: * @@ -118,7 +118,7 @@ const ( */ // ZoneInterconnectHandler creates the OVN resources required for interconnecting -// multiple zones for a network (default or secondary layer 3) +// multiple zones for a network (default or layer 3) UDN type ZoneInterconnectHandler struct { watchFactory *factory.WatchFactory // network which is inter-connected @@ -156,8 +156,8 @@ func getTransitSwitchName(nInfo util.NetInfo) string { func (zic *ZoneInterconnectHandler) createOrUpdateTransitSwitch(networkID int) error { externalIDs := make(map[string]string) - if zic.IsSecondary() { - externalIDs = getSecondaryNetTransitSwitchExtIDs(zic.GetNetworkName(), zic.TopologyType(), zic.IsPrimaryNetwork()) + if zic.IsUserDefinedNetwork() { + externalIDs = getUserDefinedNetTransitSwitchExtIDs(zic.GetNetworkName(), zic.TopologyType(), zic.IsPrimaryNetwork()) } ts := &nbdb.LogicalSwitch{ Name: zic.networkTransitSwitchName, @@ -238,7 +238,7 @@ func (zic *ZoneInterconnectHandler) AddRemoteZoneNode(node *corev1.Node) error { var nodeGRPIPs []*net.IPNet // only primary networks have cluster router connected to join switch+GR // used for adding routes to GR - if !zic.IsSecondary() || (util.IsNetworkSegmentationSupportEnabled() && zic.IsPrimaryNetwork()) { + if !zic.IsUserDefinedNetwork() || (util.IsNetworkSegmentationSupportEnabled() && zic.IsPrimaryNetwork()) { nodeGRPIPs, err = udn.GetGWRouterIPs(node, zic.GetNetInfo()) if err != nil { if util.IsAnnotationNotSetError(err) { @@ -647,8 +647,8 @@ func (zic *ZoneInterconnectHandler) deleteLocalNodeStaticRoutes(node *corev1.Nod } } - if zic.IsSecondary() { - // Secondary network cluster router doesn't connect to a join switch + if zic.IsUserDefinedNetwork() { + // UDN cluster router doesn't connect to a join switch // or to a Gateway router. return nil } @@ -719,7 +719,7 @@ func (zic *ZoneInterconnectHandler) getStaticRoutes(ipPrefixes []*net.IPNet, nex return staticRoutes } -func getSecondaryNetTransitSwitchExtIDs(networkName, topology string, isPrimaryUDN bool) map[string]string { +func getUserDefinedNetTransitSwitchExtIDs(networkName, topology string, isPrimaryUDN bool) map[string]string { return map[string]string{ types.NetworkExternalID: networkName, types.NetworkRoleExternalID: util.GetUserDefinedNetworkRole(isPrimaryUDN), diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go index 0b6570173f..5f7211b9cf 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go @@ -72,7 +72,7 @@ func getNetworkScopedName(netName, name string) string { if netName == types.DefaultNetworkName { return name } - return fmt.Sprintf("%s%s", util.GetSecondaryNetworkPrefix(netName), name) + return fmt.Sprintf("%s%s", util.GetUserDefinedNetworkPrefix(netName), name) } func invokeICHandlerAddNodeFunction(zone string, icHandler *ZoneInterconnectHandler, nodes ...*corev1.Node) error { diff --git a/go-controller/pkg/types/const.go b/go-controller/pkg/types/const.go index 523da8e27b..20fdf23d31 100644 --- a/go-controller/pkg/types/const.go +++ b/go-controller/pkg/types/const.go @@ -225,19 +225,19 @@ const ( // RequiredUDNNamespaceLabel is the required namespace label for enabling primary UDNs RequiredUDNNamespaceLabel = "k8s.ovn.org/primary-user-defined-network" - // different secondary network topology type defined in CNI netconf + // different user-defined network topology types defined in CNI netconf Layer3Topology = "layer3" Layer2Topology = "layer2" LocalnetTopology = "localnet" // different types of network roles - // defined in CNI netconf as a user defined network + // defined in CNI netconf as a user-defined network NetworkRolePrimary = "primary" NetworkRoleSecondary = "secondary" NetworkRoleDefault = "default" // NetworkRoleInfrastructure is defined internally by ovnkube to recognize "default" // network's role as an "infrastructure-locked" network - // when a user defined network is the primary network for + // when a user-defined network is the primary network for // the pod which makes "default" network neither primary // nor secondary NetworkRoleInfrastructure = "infrastructure-locked" diff --git a/go-controller/pkg/util/mocks/multinetwork/NetInfo.go b/go-controller/pkg/util/mocks/multinetwork/NetInfo.go index e94a82edd5..42e5808356 100644 --- a/go-controller/pkg/util/mocks/multinetwork/NetInfo.go +++ b/go-controller/pkg/util/mocks/multinetwork/NetInfo.go @@ -651,11 +651,11 @@ func (_m *NetInfo) IsPrimaryNetwork() bool { } // IsSecondary provides a mock function with given fields: -func (_m *NetInfo) IsSecondary() bool { +func (_m *NetInfo) IsUserDefinedNetwork() bool { ret := _m.Called() if len(ret) == 0 { - panic("no return value specified for IsSecondary") + panic("no return value specified for IsUserDefinedNetwork") } var r0 bool diff --git a/go-controller/pkg/util/multi_network.go b/go-controller/pkg/util/multi_network.go index 1073954bc4..6a8075f2b9 100644 --- a/go-controller/pkg/util/multi_network.go +++ b/go-controller/pkg/util/multi_network.go @@ -36,7 +36,7 @@ type NetInfo interface { GetNetworkID() int IsDefault() bool IsPrimaryNetwork() bool - IsSecondary() bool + IsUserDefinedNetwork() bool TopologyType() string MTU() int IPMode() (bool, bool) @@ -197,7 +197,7 @@ func copyNetInfo(netInfo NetInfo) any { switch t := netInfo.GetNetInfo().(type) { case *DefaultNetInfo: return t.copy() - case *secondaryNetInfo: + case *userDefinedNetInfo: return t.copy() default: panic(fmt.Errorf("unrecognized type %T", t)) @@ -208,7 +208,7 @@ func reconcilable(netInfo NetInfo) ReconcilableNetInfo { switch t := netInfo.GetNetInfo().(type) { case *DefaultNetInfo: return t - case *secondaryNetInfo: + case *userDefinedNetInfo: return t default: panic(fmt.Errorf("unrecognized type %T", t)) @@ -237,7 +237,7 @@ func mutable(netInfo NetInfo) *mutableNetInfo { switch t := netInfo.GetNetInfo().(type) { case *DefaultNetInfo: return &t.mutableNetInfo - case *secondaryNetInfo: + case *userDefinedNetInfo: return &t.mutableNetInfo default: panic(fmt.Errorf("unrecognized type %T", t)) @@ -482,16 +482,16 @@ func (nInfo *DefaultNetInfo) IsDefault() bool { } // IsPrimaryNetwork always returns false for default network. -// The boolean indicates if this secondary network is +// The boolean indicates if the default network is // meant to be the primary network for the pod. Since default -// network is never a secondary network this is always false. -// This cannot be true if IsSecondary() is not true. +// network is never a User Defined Network this is always false. +// This cannot be true if IsUserDefinedNetwork() is not true. func (nInfo *DefaultNetInfo) IsPrimaryNetwork() bool { return false } -// IsSecondary returns if this network is secondary -func (nInfo *DefaultNetInfo) IsSecondary() bool { +// IsUserDefinedNetwork returns if this network is secondary +func (nInfo *DefaultNetInfo) IsUserDefinedNetwork() bool { return false } @@ -610,7 +610,7 @@ func (nInfo *DefaultNetInfo) JoinSubnetV6() *net.IPNet { return cidr } -// JoinSubnets returns the secondaryNetInfo's joinsubnet values (both v4&v6) +// JoinSubnets returns the userDefinedNetInfo's joinsubnet values (both v4&v6) // used from Equals func (nInfo *DefaultNetInfo) JoinSubnets() []*net.IPNet { var defaultJoinSubnets []*net.IPNet @@ -652,12 +652,12 @@ func (nInfo *DefaultNetInfo) GetNodeManagementIP(hostSubnet *net.IPNet) *net.IPN return GetNodeManagementIfAddr(hostSubnet) } -// SecondaryNetInfo holds the network name information for secondary network if non-nil -type secondaryNetInfo struct { +// userDefinedNetInfo holds the network name information for a User Defined Network if non-nil +type userDefinedNetInfo struct { mutableNetInfo netName string - // Should this secondary network be used + // Should this User Defined Network be used // as the pod's primary network? primaryNetwork bool topology string @@ -677,58 +677,58 @@ type secondaryNetInfo struct { managementIPs []net.IP } -func (nInfo *secondaryNetInfo) GetNetInfo() NetInfo { +func (nInfo *userDefinedNetInfo) GetNetInfo() NetInfo { return nInfo } // GetNetworkName returns the network name -func (nInfo *secondaryNetInfo) GetNetworkName() string { +func (nInfo *userDefinedNetInfo) GetNetworkName() string { return nInfo.netName } -// IsDefault always returns false for all secondary networks. -func (nInfo *secondaryNetInfo) IsDefault() bool { +// IsDefault always returns false for all User Defined Networks. +func (nInfo *userDefinedNetInfo) IsDefault() bool { return false } -// IsPrimaryNetwork returns if this secondary network +// IsPrimaryNetwork returns if this User Defined Network // should be used as the primaryNetwork for the pod // to achieve native network segmentation -func (nInfo *secondaryNetInfo) IsPrimaryNetwork() bool { +func (nInfo *userDefinedNetInfo) IsPrimaryNetwork() bool { return nInfo.primaryNetwork } -// IsSecondary returns if this network is secondary -func (nInfo *secondaryNetInfo) IsSecondary() bool { +// IsUserDefinedNetwork returns if this network is a User Defined Network +func (nInfo *userDefinedNetInfo) IsUserDefinedNetwork() bool { return true } // GetNetworkScopedName returns a network scoped name from the provided one // appropriate to use globally. -func (nInfo *secondaryNetInfo) GetNetworkScopedName(name string) string { +func (nInfo *userDefinedNetInfo) GetNetworkScopedName(name string) string { return fmt.Sprintf("%s%s", nInfo.getPrefix(), name) } // RemoveNetworkScopeFromName removes the name without the network scope added // by a previous call to GetNetworkScopedName -func (nInfo *secondaryNetInfo) RemoveNetworkScopeFromName(name string) string { +func (nInfo *userDefinedNetInfo) RemoveNetworkScopeFromName(name string) string { // for the default network, names are not scoped - return strings.Trim(name, nInfo.getPrefix()) + return strings.TrimPrefix(name, nInfo.getPrefix()) } -func (nInfo *secondaryNetInfo) GetNetworkScopedK8sMgmtIntfName(nodeName string) string { +func (nInfo *userDefinedNetInfo) GetNetworkScopedK8sMgmtIntfName(nodeName string) string { return GetK8sMgmtIntfName(nInfo.GetNetworkScopedName(nodeName)) } -func (nInfo *secondaryNetInfo) GetNetworkScopedClusterRouterName() string { +func (nInfo *userDefinedNetInfo) GetNetworkScopedClusterRouterName() string { return nInfo.GetNetworkScopedName(types.OVNClusterRouter) } -func (nInfo *secondaryNetInfo) GetNetworkScopedGWRouterName(nodeName string) string { +func (nInfo *userDefinedNetInfo) GetNetworkScopedGWRouterName(nodeName string) string { return GetGatewayRouterFromNode(nInfo.GetNetworkScopedName(nodeName)) } -func (nInfo *secondaryNetInfo) GetNetworkScopedSwitchName(nodeName string) string { +func (nInfo *userDefinedNetInfo) GetNetworkScopedSwitchName(nodeName string) string { // In Layer2Topology there is just one global switch if nInfo.TopologyType() == types.Layer2Topology { return nInfo.GetNetworkScopedName(types.OVNLayer2Switch) @@ -736,61 +736,61 @@ func (nInfo *secondaryNetInfo) GetNetworkScopedSwitchName(nodeName string) strin return nInfo.GetNetworkScopedName(nodeName) } -func (nInfo *secondaryNetInfo) GetNetworkScopedJoinSwitchName() string { +func (nInfo *userDefinedNetInfo) GetNetworkScopedJoinSwitchName() string { return nInfo.GetNetworkScopedName(types.OVNJoinSwitch) } -func (nInfo *secondaryNetInfo) GetNetworkScopedExtSwitchName(nodeName string) string { +func (nInfo *userDefinedNetInfo) GetNetworkScopedExtSwitchName(nodeName string) string { return GetExtSwitchFromNode(nInfo.GetNetworkScopedName(nodeName)) } -func (nInfo *secondaryNetInfo) GetNetworkScopedPatchPortName(bridgeID, nodeName string) string { +func (nInfo *userDefinedNetInfo) GetNetworkScopedPatchPortName(bridgeID, nodeName string) string { return GetPatchPortName(bridgeID, nInfo.GetNetworkScopedName(nodeName)) } -func (nInfo *secondaryNetInfo) GetNetworkScopedExtPortName(bridgeID, nodeName string) string { +func (nInfo *userDefinedNetInfo) GetNetworkScopedExtPortName(bridgeID, nodeName string) string { return GetExtPortName(bridgeID, nInfo.GetNetworkScopedName(nodeName)) } -func (nInfo *secondaryNetInfo) GetNetworkScopedLoadBalancerName(lbName string) string { +func (nInfo *userDefinedNetInfo) GetNetworkScopedLoadBalancerName(lbName string) string { return nInfo.GetNetworkScopedName(lbName) } -func (nInfo *secondaryNetInfo) GetNetworkScopedLoadBalancerGroupName(lbGroupName string) string { +func (nInfo *userDefinedNetInfo) GetNetworkScopedLoadBalancerGroupName(lbGroupName string) string { return nInfo.GetNetworkScopedName(lbGroupName) } // getPrefix returns if the logical entities prefix for this network -func (nInfo *secondaryNetInfo) getPrefix() string { - return GetSecondaryNetworkPrefix(nInfo.netName) +func (nInfo *userDefinedNetInfo) getPrefix() string { + return GetUserDefinedNetworkPrefix(nInfo.netName) } // TopologyType returns the topology type -func (nInfo *secondaryNetInfo) TopologyType() string { +func (nInfo *userDefinedNetInfo) TopologyType() string { return nInfo.topology } // MTU returns the layer3NetConfInfo's MTU value -func (nInfo *secondaryNetInfo) MTU() int { +func (nInfo *userDefinedNetInfo) MTU() int { return nInfo.mtu } // Vlan returns the Vlan value -func (nInfo *secondaryNetInfo) Vlan() uint { +func (nInfo *userDefinedNetInfo) Vlan() uint { return nInfo.vlan } // AllowsPersistentIPs returns the defaultNetConfInfo's AllowPersistentIPs value -func (nInfo *secondaryNetInfo) AllowsPersistentIPs() bool { +func (nInfo *userDefinedNetInfo) AllowsPersistentIPs() bool { return nInfo.allowPersistentIPs } // PhysicalNetworkName returns the user provided physical network name value -func (nInfo *secondaryNetInfo) PhysicalNetworkName() string { +func (nInfo *userDefinedNetInfo) PhysicalNetworkName() string { return nInfo.physicalNetworkName } -func (nInfo *secondaryNetInfo) GetNodeGatewayIP(hostSubnet *net.IPNet) *net.IPNet { +func (nInfo *userDefinedNetInfo) GetNodeGatewayIP(hostSubnet *net.IPNet) *net.IPNet { if IsPreconfiguredUDNAddressesEnabled() && nInfo.TopologyType() == types.Layer2Topology && nInfo.IsPrimaryNetwork() { isIPV6 := knet.IsIPv6CIDR(hostSubnet) gwIP, _ := MatchFirstIPFamily(isIPV6, nInfo.defaultGatewayIPs) @@ -802,7 +802,7 @@ func (nInfo *secondaryNetInfo) GetNodeGatewayIP(hostSubnet *net.IPNet) *net.IPNe return GetNodeGatewayIfAddr(hostSubnet) } -func (nInfo *secondaryNetInfo) GetNodeManagementIP(hostSubnet *net.IPNet) *net.IPNet { +func (nInfo *userDefinedNetInfo) GetNodeManagementIP(hostSubnet *net.IPNet) *net.IPNet { if IsPreconfiguredUDNAddressesEnabled() && nInfo.TopologyType() == types.Layer2Topology && nInfo.IsPrimaryNetwork() { isIPV6 := knet.IsIPv6CIDR(hostSubnet) mgmtIP, _ := MatchFirstIPFamily(isIPV6, nInfo.managementIPs) @@ -815,56 +815,56 @@ func (nInfo *secondaryNetInfo) GetNodeManagementIP(hostSubnet *net.IPNet) *net.I } // IPMode returns the ipv4/ipv6 mode -func (nInfo *secondaryNetInfo) IPMode() (bool, bool) { +func (nInfo *userDefinedNetInfo) IPMode() (bool, bool) { return nInfo.ipv4mode, nInfo.ipv6mode } // Subnets returns the Subnets value -func (nInfo *secondaryNetInfo) Subnets() []config.CIDRNetworkEntry { +func (nInfo *userDefinedNetInfo) Subnets() []config.CIDRNetworkEntry { return nInfo.subnets } // ExcludeSubnets returns the ExcludeSubnets value -func (nInfo *secondaryNetInfo) ExcludeSubnets() []*net.IPNet { +func (nInfo *userDefinedNetInfo) ExcludeSubnets() []*net.IPNet { return nInfo.excludeSubnets } // ReservedSubnets returns the ReservedSubnets value -func (nInfo *secondaryNetInfo) ReservedSubnets() []*net.IPNet { +func (nInfo *userDefinedNetInfo) ReservedSubnets() []*net.IPNet { return nInfo.reservedSubnets } // InfrastructureSubnets returns the InfrastructureSubnets value -func (nInfo *secondaryNetInfo) InfrastructureSubnets() []*net.IPNet { +func (nInfo *userDefinedNetInfo) InfrastructureSubnets() []*net.IPNet { return nInfo.infrastructureSubnets } // JoinSubnetV4 returns the defaultNetConfInfo's JoinSubnetV4 value // call when ipv4mode=true -func (nInfo *secondaryNetInfo) JoinSubnetV4() *net.IPNet { +func (nInfo *userDefinedNetInfo) JoinSubnetV4() *net.IPNet { if len(nInfo.joinSubnets) == 0 { return nil // localnet topology } return nInfo.joinSubnets[0] } -// JoinSubnetV6 returns the secondaryNetInfo's JoinSubnetV6 value +// JoinSubnetV6 returns the userDefinedNetInfo's JoinSubnetV6 value // call when ipv6mode=true -func (nInfo *secondaryNetInfo) JoinSubnetV6() *net.IPNet { +func (nInfo *userDefinedNetInfo) JoinSubnetV6() *net.IPNet { if len(nInfo.joinSubnets) <= 1 { return nil // localnet topology } return nInfo.joinSubnets[1] } -// JoinSubnets returns the secondaryNetInfo's joinsubnet values (both v4&v6) +// JoinSubnets returns the userDefinedNetInfo's joinsubnet values (both v4&v6) // used from Equals (since localnet doesn't have joinsubnets to compare nil v/s nil // we need this util) -func (nInfo *secondaryNetInfo) JoinSubnets() []*net.IPNet { +func (nInfo *userDefinedNetInfo) JoinSubnets() []*net.IPNet { return nInfo.joinSubnets } -func (nInfo *secondaryNetInfo) canReconcile(other NetInfo) bool { +func (nInfo *userDefinedNetInfo) canReconcile(other NetInfo) bool { if (nInfo == nil) != (other == nil) { return false } @@ -916,9 +916,9 @@ func (nInfo *secondaryNetInfo) canReconcile(other NetInfo) bool { return cmp.Equal(nInfo.joinSubnets, other.JoinSubnets(), cmpopts.SortSlices(lessIPNet)) } -func (nInfo *secondaryNetInfo) copy() *secondaryNetInfo { +func (nInfo *userDefinedNetInfo) copy() *userDefinedNetInfo { // everything here is immutable - c := &secondaryNetInfo{ + c := &userDefinedNetInfo{ netName: nInfo.netName, primaryNetwork: nInfo.primaryNetwork, topology: nInfo.topology, @@ -951,7 +951,7 @@ func newLayer3NetConfInfo(netconf *ovncnitypes.NetConf) (MutableNetInfo, error) if err != nil { return nil, err } - ni := &secondaryNetInfo{ + ni := &userDefinedNetInfo{ netName: netconf.Name, primaryNetwork: netconf.Role == types.NetworkRolePrimary, topology: types.Layer3Topology, @@ -1014,7 +1014,7 @@ func newLayer2NetConfInfo(netconf *ovncnitypes.NetConf) (MutableNetInfo, error) } } - ni := &secondaryNetInfo{ + ni := &userDefinedNetInfo{ netName: netconf.Name, primaryNetwork: netconf.Role == types.NetworkRolePrimary, topology: types.Layer2Topology, @@ -1051,7 +1051,7 @@ func newLocalnetNetConfInfo(netconf *ovncnitypes.NetConf) (MutableNetInfo, error return nil, err } - ni := &secondaryNetInfo{ + ni := &userDefinedNetInfo{ netName: netconf.Name, topology: types.LocalnetTopology, subnets: subnets, @@ -1175,12 +1175,12 @@ func GetNADName(namespace, name string) string { return fmt.Sprintf("%s/%s", namespace, name) } -// GetSecondaryNetworkPrefix gets the string used as prefix of the logical entities -// of the secondary network of the given network name, in the form of _. +// GetUserDefinedNetworkPrefix gets the string used as prefix of the logical entities +// of the User Defined Network of the given network name, in the form of _. // // Note that for port_group and address_set, it does not allow the '-' character, // which will be replaced with ".". Also replace "/" in the nadName with "." -func GetSecondaryNetworkPrefix(netName string) string { +func GetUserDefinedNetworkPrefix(netName string) string { name := strings.ReplaceAll(netName, "-", ".") name = strings.ReplaceAll(name, "/", ".") return name + "_" @@ -1210,7 +1210,7 @@ func newNetInfo(netconf *ovncnitypes.NetConf) (MutableNetInfo, error) { if err != nil { return nil, err } - if ni.IsPrimaryNetwork() && ni.IsSecondary() { + if ni.IsPrimaryNetwork() && ni.IsUserDefinedNetwork() { ipv4Mode, ipv6Mode := ni.IPMode() if ipv4Mode && !config.IPv4Mode { return nil, fmt.Errorf("network %s is attempting to use ipv4 subnets but the cluster does not support ipv4", ni.GetNetworkName()) @@ -1234,7 +1234,7 @@ func GetAnnotatedNetworkName(netattachdef *nettypes.NetworkAttachmentDefinition) return netattachdef.Annotations[types.OvnNetworkNameAnnotation] } -// ParseNADInfo parses config in NAD spec and return a NetAttachDefInfo object for secondary networks +// ParseNADInfo parses config in NAD spec and return a NetAttachDefInfo object for User Defined Networks func ParseNADInfo(nad *nettypes.NetworkAttachmentDefinition) (NetInfo, error) { netconf, err := ParseNetConf(nad) if err != nil { @@ -1267,7 +1267,7 @@ func ParseNADInfo(nad *nettypes.NetworkAttachmentDefinition) (NetInfo, error) { return n, nil } -// ParseNetConf parses config in NAD spec for secondary networks +// ParseNetConf parses config in NAD spec for User Defined Networks func ParseNetConf(netattachdef *nettypes.NetworkAttachmentDefinition) (*ovncnitypes.NetConf, error) { netconf, err := config.ParseNetConf([]byte(netattachdef.Spec.Config)) if err != nil { @@ -1365,6 +1365,14 @@ func subnetOverlapCheck(netconf *ovncnitypes.NetConf) error { allSubnets.Append(config.ConfigSubnetMasquerade, v4MasqueradeCIDR) allSubnets.Append(config.ConfigSubnetMasquerade, v6MasqueradeCIDR) + if netconf.Topology == types.Layer3Topology { + _, v4TransitCIDR, _ := net.ParseCIDR(config.ClusterManager.V4TransitSwitchSubnet) + _, v6TransitCIDR, _ := net.ParseCIDR(config.ClusterManager.V6TransitSwitchSubnet) + + allSubnets.Append(config.ConfigSubnetTransit, v4TransitCIDR) + allSubnets.Append(config.ConfigSubnetTransit, v6TransitCIDR) + } + ni, err := NewNetInfo(netconf) if err != nil { return fmt.Errorf("error while parsing subnets: %v", err) @@ -1408,7 +1416,7 @@ func GetPodNADToNetworkMapping(pod *corev1.Pod, nInfo NetInfo) (bool, map[string networkSelections := map[string]*nettypes.NetworkSelectionElement{} podDesc := fmt.Sprintf("%s/%s", pod.Namespace, pod.Name) - if !nInfo.IsSecondary() { + if !nInfo.IsUserDefinedNetwork() { network, err := GetK8sPodDefaultNetworkSelection(pod) if err != nil { // multus won't add this Pod if this fails, should never happen @@ -1561,7 +1569,7 @@ func AllowsPersistentIPs(netInfo NetInfo) bool { case netInfo.IsPrimaryNetwork(): return netInfo.TopologyType() == types.Layer2Topology && netInfo.AllowsPersistentIPs() - case netInfo.IsSecondary(): + case netInfo.IsUserDefinedNetwork(): return (netInfo.TopologyType() == types.Layer2Topology || netInfo.TopologyType() == types.LocalnetTopology) && netInfo.AllowsPersistentIPs() @@ -1702,8 +1710,8 @@ func GetNetworkRole(controllerNetInfo NetInfo, getActiveNetworkForNamespace func // (C)UDN network name generation functions must ensure the absence of name conflicts between all (C)UDNs. // We use underscore as a separator as it is not allowed in k8s namespaces and names. -// Network name is then used by GetSecondaryNetworkPrefix function to generate db object names. -// GetSecondaryNetworkPrefix replaces some characters in the network name to ensure correct db object names, +// Network name is then used by GetUserDefinedNetworkPrefix function to generate db object names. +// GetUserDefinedNetworkPrefix replaces some characters in the network name to ensure correct db object names, // so the network name must be also unique after these replacements. func GenerateUDNNetworkName(namespace, name string) string { diff --git a/go-controller/pkg/util/multi_network_test.go b/go-controller/pkg/util/multi_network_test.go index 24746599de..5fee0f9c42 100644 --- a/go-controller/pkg/util/multi_network_test.go +++ b/go-controller/pkg/util/multi_network_test.go @@ -1364,6 +1364,8 @@ func TestSubnetOverlapCheck(t *testing.T) { config.Gateway.V6MasqueradeSubnet = "fd69::/125" config.Gateway.V4JoinSubnet = "100.64.0.0/16" config.Gateway.V6JoinSubnet = "fd98::/64" + config.ClusterManager.V4TransitSwitchSubnet = "100.88.0.0/16" + config.ClusterManager.V6TransitSwitchSubnet = "fd97::/64" type testConfig struct { desc string inputNetAttachDefConfigSpec string @@ -1371,6 +1373,23 @@ func TestSubnetOverlapCheck(t *testing.T) { } tests := []testConfig{ + { + desc: "return error when IPv4 POD subnet in net-attach-def overlaps with transit switch subnet", + inputNetAttachDefConfigSpec: ` + { + "name": "tenantred", + "type": "ovn-k8s-cni-overlay", + "topology": "layer3", + "subnets": "100.88.0.0/17", + "joinSubnet": "100.65.0.0/24", + "primaryNetwork": true, + "netAttachDefName": "ns1/nad1" + } + `, + expectedError: config.NewSubnetOverlapError( + config.ConfigSubnet{SubnetType: config.UserDefinedSubnets, Subnet: MustParseCIDR("100.88.0.0/17")}, + config.ConfigSubnet{SubnetType: config.ConfigSubnetTransit, Subnet: MustParseCIDR(config.ClusterManager.V4TransitSwitchSubnet)}), + }, { desc: "return error when IPv4 POD subnet in net-attach-def overlaps other subnets", inputNetAttachDefConfigSpec: ` @@ -1582,8 +1601,8 @@ func TestAreNetworksCompatible(t *testing.T) { }{ { desc: "physical network name update", - aNetwork: &secondaryNetInfo{physicalNetworkName: "A"}, - anotherNetwork: &secondaryNetInfo{physicalNetworkName: "B"}, + aNetwork: &userDefinedNetInfo{physicalNetworkName: "A"}, + anotherNetwork: &userDefinedNetInfo{physicalNetworkName: "B"}, expectedResult: false, expectationDescription: "we should reconcile on physical network name updates", }, diff --git a/go-controller/pkg/util/pod_annotation.go b/go-controller/pkg/util/pod_annotation.go index ba565571bc..df89537b30 100644 --- a/go-controller/pkg/util/pod_annotation.go +++ b/go-controller/pkg/util/pod_annotation.go @@ -338,7 +338,7 @@ func GetPodCIDRsWithFullMask(pod *corev1.Pod, nInfo NetInfo) ([]*net.IPNet, erro // and then falling back to the Pod Status IPs. This function is intended to // also return IPs for HostNetwork and other non-OVN-IPAM-ed pods. func GetPodIPsOfNetwork(pod *corev1.Pod, nInfo NetInfo) ([]net.IP, error) { - if nInfo.IsSecondary() { + if nInfo.IsUserDefinedNetwork() { return SecondaryNetworkPodIPs(pod, nInfo) } return DefaultNetworkPodIPs(pod) diff --git a/go-controller/pkg/util/util.go b/go-controller/pkg/util/util.go index 4455de04c9..fda62d3fcd 100644 --- a/go-controller/pkg/util/util.go +++ b/go-controller/pkg/util/util.go @@ -416,7 +416,7 @@ func GetUserDefinedNetworkRole(isPrimary bool) string { // when on the default cluster network, for backward compatibility. func GenerateExternalIDsForSwitchOrRouter(netInfo NetInfo) map[string]string { externalIDs := make(map[string]string) - if netInfo.IsSecondary() { + if netInfo.IsUserDefinedNetwork() { externalIDs[types.NetworkExternalID] = netInfo.GetNetworkName() externalIDs[types.NetworkRoleExternalID] = GetUserDefinedNetworkRole(netInfo.IsPrimaryNetwork()) externalIDs[types.TopologyExternalID] = netInfo.TopologyType() @@ -424,8 +424,8 @@ func GenerateExternalIDsForSwitchOrRouter(netInfo NetInfo) map[string]string { return externalIDs } -func GetSecondaryNetworkLogicalPortName(podNamespace, podName, nadName string) string { - return GetSecondaryNetworkPrefix(nadName) + composePortName(podNamespace, podName) +func GetUserDefinedNetworkLogicalPortName(podNamespace, podName, nadName string) string { + return GetUserDefinedNetworkPrefix(nadName) + composePortName(podNamespace, podName) } func GetLogicalPortName(podNamespace, podName string) string { @@ -436,8 +436,8 @@ func GetNamespacePodFromCDNPortName(portName string) (string, string) { return decomposePortName(portName) } -func GetSecondaryNetworkIfaceId(podNamespace, podName, nadName string) string { - return GetSecondaryNetworkPrefix(nadName) + composePortName(podNamespace, podName) +func GetUDNIfaceId(podNamespace, podName, nadName string) string { + return GetUserDefinedNetworkPrefix(nadName) + composePortName(podNamespace, podName) } func GetIfaceId(podNamespace, podName string) string { diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/.gitignore b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/.gitignore new file mode 100644 index 0000000000..860dc4918f --- /dev/null +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/.gitignore @@ -0,0 +1,31 @@ + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +*.cover +*.lcov + +# Kubernetes Generated files - skip generated files, except for vendored files + +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +.vscode +*.swp +*.swo +*~ + +# Folders +bin +testbin +build diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/.golangci.yml b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/.golangci.yml index 64dbb3614d..11e1256043 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/.golangci.yml +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/.golangci.yml @@ -1,116 +1,62 @@ +version: "2" run: - timeout: 10m - - # If set we pass it to "go list -mod={option}". From "go help modules": - # If invoked with -mod=readonly, the go command is disallowed from the implicit - # automatic updating of go.mod described above. Instead, it fails when any changes - # to go.mod are needed. This setting is most useful to check that go.mod does - # not need updates, such as in a continuous integration and testing system. - # If invoked with -mod=vendor, the go command assumes that the vendor - # directory holds the correct copies of dependencies and ignores - # the dependency descriptions in go.mod. - # - # Allowed values: readonly|vendor|mod - # By default, it isn't set. - modules-download-mode: readonly - tests: false - -linters-settings: - dupl: - threshold: 150 - funlen: - lines: 100 - statements: 50 - goconst: - min-len: 2 - min-occurrences: 2 - gocritic: - enabled-tags: - - diagnostic - - experimental - - opinionated - - performance - - style - disabled-checks: - - dupImport # https://github.com/go-critic/go-critic/issues/845 - - ifElseChain - - octalLiteral - - whyNoLint - - wrapperFunc - - unnamedResult - gocognit: - min-complexity: 30 - goimports: - local-prefixes: github.com/k8snetworkplumbingwg/sriovnet - golint: - min-confidence: 0 - gomnd: - settings: - mnd: - # don't include the "operation" and "assign" - checks: argument,case,condition,return - ignored-numbers: "1,2,10,32" - govet: - check-shadowing: true - settings: - printf: - funcs: - - (github.com/rs/zerolog/zerolog.Event).Msgf - lll: - line-length: 120 - misspell: - locale: US - ignore-words: - - flavour - - flavours - prealloc: - # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. - # True by default. - simple: true - range-loops: true # Report preallocation suggestions on range loops, true by default - for-loops: false # Report preallocation suggestions on for loops, false by default - + allow-parallel-runners: true linters: - # please, do not use `enable-all`: it's deprecated and will be removed soon. - # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint - disable-all: true + default: none enable: - - bodyclose - - depguard - - dogsled + - copyloopvar - dupl - errcheck - - funlen - - gochecknoinits + - ginkgolinter - goconst - - gocritic - - gocognit - - gofmt - - goimports - - gomnd - - goprintffuncname - - gosec - - gosimple + - gocyclo - govet + - gocritic + - importas - ineffassign - - lll - misspell - nakedret - prealloc - - revive - - rowserrcheck - - exportloopref - staticcheck - - stylecheck - - typecheck - unconvert - unparam - unused - - whitespace + settings: + ginkgolinter: + forbid-focus-container: true + misspell: + locale: US + ignore-rules: + - flavour + staticcheck: + checks: + - all + - -ST1000 + - -ST1003 + - -QF1008 + dot-import-whitelist: + - github.com/onsi/ginkgo/v2 + - github.com/onsi/gomega + exclusions: + generated: lax + rules: + - linters: + - dupl + - goconst + path: _test\.go issues: - # Excluding configuration per-path, per-linter, per-text and per-source - exclude-rules: - - text: "Magic number: 1" - linters: - - gomnd + max-issues-per-linter: 0 + max-same-issues: 0 +formatters: + enable: + - gci + - gofmt + - goimports + settings: + gci: + sections: + - standard + - default + - prefix(github.com/k8snetworkplumbingwg/sriovnet) + custom-order: true diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/Makefile b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/Makefile index 180a8a809d..13ed306c22 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/Makefile +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/Makefile @@ -11,7 +11,7 @@ GCOV2LCOV := $(BIN_DIR)/gcov2lcov # golangci-lint version should be updated periodically # we keep it fixed to avoid it from unexpectedly failing on the project # in case of a version bump -GOLANGCI_LINT_VER := v1.49.0 +GOLANGCI_LINT_VER := v2.3.0 Q = $(if $(filter 1,$V),,@) @@ -32,7 +32,7 @@ lint: | $(GOLANGCI_LINT) ; $(info running golangci-lint...) @ ## Run lint tests .PHONY: test tests test: ; $(info running unit tests...) ## Run unit tests - $Q go test ./... + $Q go test -race ./... tests: test lint ; ## Run all tests @@ -41,11 +41,11 @@ COVERAGE_MODE = count test-coverage-tools: $(GCOV2LCOV) test-coverage: | test-coverage-tools; $(info running coverage tests...) @ ## Run coverage tests $Q go test -covermode=$(COVERAGE_MODE) -coverprofile=sriovnet.cover ./... - $Q $(GCOV2LCOV) -infile sriovnet.cover -outfile sriovnet.info + $Q $(GCOV2LCOV) -infile sriovnet.cover -outfile sriovnet.lcov # Tools $(GOLANGCI_LINT): | $(BIN_DIR) ; $(info building golangci-lint...) - $Q GOBIN=$(BIN_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VER) + $Q GOBIN=$(BIN_DIR) go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@$(GOLANGCI_LINT_VER) $(GCOV2LCOV): | $(BIN_DIR) ; $(info building gocov2lcov...) $Q GOBIN=$(BIN_DIR) go install github.com/jandelgado/gcov2lcov@v1.0.5 @@ -55,7 +55,7 @@ $(GCOV2LCOV): | $(BIN_DIR) ; $(info building gocov2lcov...) clean: ; $(info Cleaning...) @ ## Cleanup everything @rm -rf $(BIN_DIR) @rm sriovnet.cover - @rm sriovnet.info + @rm sriovnet.lcov .PHONY: help help: ; @ ## Show this message diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/errors.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/errors.go new file mode 100644 index 0000000000..aa6ff7ebdc --- /dev/null +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/errors.go @@ -0,0 +1,25 @@ +/* +Copyright 2023 NVIDIA CORPORATION & AFFILIATES + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sriovnet + +import ( + "errors" +) + +var ( + ErrDeviceNotFound = errors.New("device not found") +) diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/file_access.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/file_access.go index b0fe653b3b..f2418c1455 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/file_access.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/file_access.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 NVIDIA CORPORATION & + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + //nolint:gomnd package sriovnet @@ -109,7 +125,9 @@ func lsFilesWithPrefix(dir, filePrefix string, ignoreDir bool) ([]string, error) if err != nil { return nil, err } - defer f.Close() + defer func() { + _ = f.Close() + }() fileInfos, err := f.Readdir(-1) if err != nil { return nil, err diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/mofed_ib_helper.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/mofed_ib_helper.go index 0e99e41913..6bf9aad8c8 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/mofed_ib_helper.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/mofed_ib_helper.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 NVIDIA CORPORATION & AFFILIATES + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package sriovnet import ( diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/pkg/utils/filesystem/fakefs.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/pkg/utils/filesystem/fakefs.go index 05e6a4ca91..1f43cd8fae 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/pkg/utils/filesystem/fakefs.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/pkg/utils/filesystem/fakefs.go @@ -37,7 +37,7 @@ func NewFakeFs(fakeFsRoot string) (Filesystem, func(), error) { return &FakeFs{a: afero.Afero{Fs: afero.NewBasePathFs(afero.NewOsFs(), fakeFsRoot)}}, func() { - os.RemoveAll(fakeFsRoot) + _ = os.RemoveAll(fakeFsRoot) }, nil } diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet.go index 0a961a7353..ecee3f5a6f 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 NVIDIA CORPORATION & AFFILIATES + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package sriovnet import ( @@ -285,9 +301,11 @@ func setDefaultHwAddr(handle *PfNetdevHandle, vf *VfObj) error { var err error ethAttr := handle.pfLinkHandle.Attrs() - if ethAttr.EncapType == etherEncapType { + + switch ethAttr.EncapType { + case etherEncapType: err = SetVfDefaultMacAddress(handle, vf) - } else if ethAttr.EncapType == ibEncapType { + case ibEncapType: err = SetVfDefaultGUID(handle, vf) } return err @@ -387,7 +405,7 @@ func AllocateVfByMacAddress(handle *PfNetdevHandle, vfMacAddress string) (*VfObj handle.PfNetdevName, vfMacAddress) } -func FreeVf(handle *PfNetdevHandle, vf *VfObj) { +func FreeVf(_ *PfNetdevHandle, vf *VfObj) { vf.Allocated = false log.Printf("Free vf = %v\n", *vf) } @@ -433,7 +451,7 @@ func GetVfIndexByPciAddress(vfPciAddress string) (int, error) { return -1, fmt.Errorf("vf index for %s not found", vfPciAddress) } -// gets the PF index that's associated with a VF PCI address (e.g '0000:03:00.4') +// GetPfIndexByVfPciAddress gets the PF index that's associated with a VF PCI address (e.g '0000:03:00.4') func GetPfIndexByVfPciAddress(vfPciAddress string) (int, error) { const pciParts = 4 pfPciAddress, err := GetPfPciFromVfPci(vfPciAddress) @@ -504,3 +522,28 @@ func GetPciFromNetDevice(name string) (string, error) { } return base, nil } + +// GetPKeyByIndexFromPci returns the PKey stored under given index for the IB PCI device +func GetPKeyByIndexFromPci(pciAddress string, index int) (string, error) { + pciDir := filepath.Join(PciSysDir, pciAddress, "infiniband") + dirEntries, err := utilfs.Fs.ReadDir(pciDir) + if err != nil { + return "", fmt.Errorf("failed to read infiniband directory: %v", err) + } + if len(dirEntries) == 0 { + return "", fmt.Errorf("infiniband directory is empty for device: %s", pciAddress) + } + + indexFilePath := filepath.Join(pciDir, dirEntries[0].Name(), "ports", "1", "pkeys", strconv.Itoa(index)) + pKeyBytes, err := utilfs.Fs.ReadFile(indexFilePath) + if err != nil { + return "", fmt.Errorf("failed to read PKey file: %v", err) + } + + return strings.TrimSpace(string(pKeyBytes)), nil +} + +// GetDefaultPKeyFromPci returns the index0 PKey for the IB PCI device +func GetDefaultPKeyFromPci(pciAddress string) (string, error) { + return GetPKeyByIndexFromPci(pciAddress, 0) +} diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet_aux.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet_aux.go index a60061b3ee..6a9ac08d9b 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet_aux.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet_aux.go @@ -1,21 +1,18 @@ -/*---------------------------------------------------- - * - * 2022 NVIDIA CORPORATION & AFFILIATES - * - * Licensed under the Apache License, Version 2.0 (the License); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - *---------------------------------------------------- - */ +/* +Copyright 2023 NVIDIA CORPORATION & AFFILIATES + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ package sriovnet @@ -28,7 +25,11 @@ import ( utilfs "github.com/k8snetworkplumbingwg/sriovnet/pkg/utils/filesystem" ) -// GetNetDeviceFromAux gets auxiliary device name (e.g 'mlx5_core.sf.2') and +const ( + u32Mask uint32 = 0xffffffff +) + +// GetNetDevicesFromAux gets auxiliary device name (e.g 'mlx5_core.sf.2') and // returns the correlate netdevice func GetNetDevicesFromAux(auxDev string) ([]string, error) { auxDir := filepath.Join(AuxSysDir, auxDev, "net") @@ -103,9 +104,39 @@ func GetAuxNetDevicesFromPci(pciAddr string) ([]string, error) { auxDevs := make([]string, 0) for _, file := range files { + if !file.IsDir() { + // auxiliary devices appear as directory here. + continue + } if auxiliaryDeviceRe.MatchString(file.Name()) { auxDevs = append(auxDevs, file.Name()) } } return auxDevs, nil } + +// GetAuxSFDevByPciAndSFIndex returns auxiliary SF device name which is associated with the given parent PCI address +// and SF index. returns error if an error occurred. returns ErrDeviceNotFound error if device is not found. +func GetAuxSFDevByPciAndSFIndex(pciAddress string, sfIndex uint32) (string, error) { + devs, err := GetAuxNetDevicesFromPci(pciAddress) + if err != nil { + return "", err + } + + for _, dev := range devs { + // skip non sf devices + if !strings.Contains(dev, ".sf.") { + continue + } + + idx, err := GetSfIndexByAuxDev(dev) + if err != nil || idx < 0 { + continue + } + + if uint32(idx)&u32Mask == sfIndex { + return dev, nil + } + } + return "", ErrDeviceNotFound +} diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet_helper.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet_helper.go index 46ab4fb7ef..bb96487edc 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet_helper.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet_helper.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 NVIDIA CORPORATION & AFFILIATES + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package sriovnet import ( diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet_switchdev.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet_switchdev.go index 5ccf3fadc5..64687e146f 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet_switchdev.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/sriovnet_switchdev.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 NVIDIA CORPORATION & AFFILIATES + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package sriovnet import ( @@ -39,19 +55,22 @@ const ( // Regex that matches on the physical/upling port name var physPortRepRegex = regexp.MustCompile(`^p(\d+)$`) -// Regex that matches on PF representor port name. These ports exists on DPUs. +// Regex that matches on PF representor port name. These ports exists on DPUs and represents ports on Host. var pfPortRepRegex = regexp.MustCompile(`^(?:c\d+)?pf(\d+)$`) -// Regex that matches on VF representor port name -var vfPortRepRegex = regexp.MustCompile(`^(?:c\d+)?pf(\d+)vf(\d+)$`) +// Regex that matches on VF representor port name for a local VF. +var vfPortRepRegex = regexp.MustCompile(`^pf(\d+)vf(\d+)$`) + +// Regex that matches on VF representor port name with controller index. These ports exists on DPUs. and represent VFs on Host. +var vfPortRepRegexWithControllerIndex = regexp.MustCompile(`^c\d+pf(\d+)vf(\d+)$`) // Regex that matches on SF representor port name -var sfPortRepRegex = regexp.MustCompile(`^(?:c\d+)?pf(\d+)sf(\d+)$`) +var sfPortRepRegex = regexp.MustCompile(`^pf(\d+)sf(\d+)$`) -func parseIndexFromPhysPortName(portName string, regex *regexp.Regexp) (pfRepIndex, vfRepIndex int, err error) { - pfRepIndex = -1 - vfRepIndex = -1 +// Regex that matches on SF representor port name with controller index. These ports exists on DPUs. and represent SFs on Host. +var sfPortRepRegexWithControllerIndex = regexp.MustCompile(`^c\d+pf(\d+)sf(\d+)$`) +func parseIndexFromPhysPortName(portName string, regex *regexp.Regexp) (pfRepIndex, vfRepIndex int, err error) { matches := regex.FindStringSubmatch(portName) //nolint:gomnd if len(matches) != 3 { @@ -65,22 +84,14 @@ func parseIndexFromPhysPortName(portName string, regex *regexp.Regexp) (pfRepInd return pfRepIndex, vfRepIndex, err } -func parsePortName(physPortName string) (pfRepIndex, vfRepIndex int, err error) { - // old kernel syntax of phys_port_name is vf index - physPortName = strings.TrimSpace(physPortName) - physPortNameInt, err := strconv.Atoi(physPortName) - if err == nil { - vfRepIndex = physPortNameInt - } else { - pfRepIndex, vfRepIndex, err = parseIndexFromPhysPortName(physPortName, vfPortRepRegex) +func parseVFPortName(physPortName string) (pfRepIndex, vfRepIndex int, err error) { + for _, regex := range []*regexp.Regexp{vfPortRepRegex, vfPortRepRegexWithControllerIndex} { + if regex.MatchString(physPortName) { + return parseIndexFromPhysPortName(physPortName, regex) + } } - return pfRepIndex, vfRepIndex, err -} -func sfIndexFromPortName(physPortName string) (int, error) { - //nolint:gomnd - _, sfRepIndex, err := parseIndexFromPhysPortName(physPortName, sfPortRepRegex) - return sfRepIndex, err + return pfRepIndex, vfRepIndex, fmt.Errorf("failed to parse vf port name %s", physPortName) } func isSwitchdev(netdevice string) bool { @@ -110,12 +121,14 @@ func GetUplinkRepresentor(pciAddress string) (string, error) { } for _, device := range devices { if isSwitchdev(device.Name()) { - // Try to get the phys port name, if not exists then fallback to check without it + devicePhysPortName, err := getNetDevPhysPortName(device.Name()) + if err != nil { + continue + } + // phys_port_name should be in formant p e.g p0,p1,p2 ...etc. - if devicePhysPortName, err := getNetDevPhysPortName(device.Name()); err == nil { - if !physPortRepRegex.MatchString(devicePhysPortName) { - continue - } + if !physPortRepRegex.MatchString(devicePhysPortName) { + continue } return device.Name(), nil @@ -124,6 +137,7 @@ func GetUplinkRepresentor(pciAddress string) (string, error) { return "", fmt.Errorf("uplink for %s not found", pciAddress) } +// GetVfRepresentor returns the VF representor netdev name for a given uplink netdev and vfIndex. func GetVfRepresentor(uplink string, vfIndex int) (string, error) { swIDFile := filepath.Join(NetSysDir, uplink, netdevPhysSwitchID) physSwitchID, err := utilfs.Fs.ReadFile(swIDFile) @@ -131,6 +145,17 @@ func GetVfRepresentor(uplink string, vfIndex int) (string, error) { return "", fmt.Errorf("cant get uplink %s switch id", uplink) } + // get uplink pci address and pci function number + pfPCIAddress, err := getPCIFromDeviceName(uplink) + if err != nil { + return "", fmt.Errorf("failed to get pci address for uplink %s: %v", uplink, err) + } + PCIFuncAddress, err := strconv.Atoi(string((pfPCIAddress[len(pfPCIAddress)-1]))) + if err != nil { + return "", fmt.Errorf("failed to get pci function number for uplink %s, pfPCIAddress %s: %w", + uplink, pfPCIAddress, err) + } + pfSubsystemPath := filepath.Join(NetSysDir, uplink, "subsystem") devices, err := utilfs.Fs.ReadDir(pfSubsystemPath) if err != nil { @@ -143,29 +168,28 @@ func GetVfRepresentor(uplink string, vfIndex int) (string, error) { if err != nil || !bytes.Equal(deviceSwID, physSwitchID) { continue } + physPortNameStr, err := getNetDevPhysPortName(device.Name()) if err != nil { continue } - pfRepIndex, vfRepIndex, _ := parsePortName(physPortNameStr) - if pfRepIndex != -1 { - pfPCIAddress, err := getPCIFromDeviceName(uplink) - if err != nil { - continue - } - PCIFuncAddress, err := strconv.Atoi(string((pfPCIAddress[len(pfPCIAddress)-1]))) - if pfRepIndex != PCIFuncAddress || err != nil { - continue - } + + pfRepIndex, vfRepIndex, err := parseIndexFromPhysPortName(physPortNameStr, vfPortRepRegex) + if err != nil { + continue } - // At this point we're confident we have a representor. - if vfRepIndex == vfIndex { + + // check pfRepIndex matches the uplink PF function number (e.g. 0000:03:00.0 -> 0) and + // vfRepIndex matches the vfIndex + if pfRepIndex == PCIFuncAddress && vfRepIndex == vfIndex { + // At this point we're confident we have a representor. return device.Name(), nil } } return "", fmt.Errorf("failed to find VF representor for uplink %s", uplink) } +// GetSfRepresentor returns the SF representor netdev name for a given uplink netdev and sfIndex. func GetSfRepresentor(uplink string, sfNum int) (string, error) { pfNetPath := filepath.Join(NetSysDir, uplink, "device", "net") devices, err := utilfs.Fs.ReadDir(pfNetPath) @@ -178,7 +202,7 @@ func GetSfRepresentor(uplink string, sfNum int) (string, error) { if err != nil { continue } - sfRepIndex, err := sfIndexFromPortName(physPortNameStr) + _, sfRepIndex, err := parseIndexFromPhysPortName(physPortNameStr, sfPortRepRegex) if err != nil { continue } @@ -248,17 +272,24 @@ func GetPortIndexFromRepresentor(repNetDev string) (int, error) { return 0, fmt.Errorf("failed to get device %s physical port name: %v", repNetDev, err) } - typeToRegex := map[PortFlavour]*regexp.Regexp{ - PORT_FLAVOUR_PCI_VF: vfPortRepRegex, - PORT_FLAVOUR_PCI_SF: sfPortRepRegex, + typeToRegex := map[PortFlavour][]*regexp.Regexp{ + PORT_FLAVOUR_PCI_VF: {vfPortRepRegex, vfPortRepRegexWithControllerIndex}, + PORT_FLAVOUR_PCI_SF: {sfPortRepRegex, sfPortRepRegexWithControllerIndex}, } - _, repIndex, err := parseIndexFromPhysPortName(physPortName, typeToRegex[flavor]) - if err != nil { - return 0, fmt.Errorf("failed to parse the physical port name of device %s: %v", repNetDev, err) + for _, regex := range typeToRegex[flavor] { + if regex.MatchString(physPortName) { + _, repIndex, err := parseIndexFromPhysPortName(physPortName, regex) + if err != nil { + return 0, fmt.Errorf("failed to parse the physical port name of device %s: %v", repNetDev, err) + } + + return repIndex, nil + } } - return repIndex, nil + return 0, fmt.Errorf("failed to get port index for representor %s. no matching regex found for phys_port_name %s", + repNetDev, physPortName) } // GetVfRepresentorDPU returns VF representor on DPU for a host VF identified by pfID and vfIndex @@ -276,25 +307,29 @@ func GetVfRepresentorDPU(pfID, vfIndex string) (string, error) { return "", fmt.Errorf("unexpected vfIndex(%s). It should be an unsigned decimal number", vfIndex) } - // map for easy search of expected VF rep port name. - // Note: no support for Multi-Chassis DPUs - expectedPhysPortNames := map[string]interface{}{ - fmt.Sprintf("pf%svf%s", pfID, vfIndex): nil, - fmt.Sprintf("c1pf%svf%s", pfID, vfIndex): nil, + // match port name with external controller index + // NOTE: no support for Multi-Chassis DPUs + expectedPhysPortName := fmt.Sprintf("c1pf%svf%s", pfID, vfIndex) + netdev, err := findNetdevWithPortNameCriteria(func(portName string) bool { + return portName == expectedPhysPortName + }) + + if err == nil { + return netdev, nil } - netdev, err := findNetdevWithPortNameCriteria(func(portName string) bool { - // if phys port name == pfvf or c1pfvf we have a match - if _, ok := expectedPhysPortNames[portName]; ok { - return true - } - return false + // match port name without controller index (legacy) + // NOTE: here we assume the only VF representors on the DPU are for host VFs (and not for local VFs). + expectedPhysPortName = fmt.Sprintf("pf%svf%s", pfID, vfIndex) + netdev, err = findNetdevWithPortNameCriteria(func(portName string) bool { + return portName == expectedPhysPortName }) - if err != nil { - return "", fmt.Errorf("vf representor for pfID:%s, vfIndex:%s not found", pfID, vfIndex) + if err == nil { + return netdev, nil } - return netdev, nil + + return "", fmt.Errorf("vf representor for pfID: %s, vfIndex: %s not found", pfID, vfIndex) } // GetSfRepresentorDPU returns SF representor on DPU for a host SF identified by pfID and sfIndex @@ -309,25 +344,49 @@ func GetSfRepresentorDPU(pfID, sfIndex string) (string, error) { return "", fmt.Errorf("unexpected sfIndex(%s). It should be an unsigned decimal number", sfIndex) } - // map for easy search of expected VF rep port name. - // Note: no support for Multi-Chassis DPUs - expectedPhysPortNames := map[string]interface{}{ - fmt.Sprintf("pf%ssf%s", pfID, sfIndex): nil, - fmt.Sprintf("c1pf%ssf%s", pfID, sfIndex): nil, + // match port name with external controller index + // NOTE: no support for Multi-Chassis DPUs + expectedPhysPortName := fmt.Sprintf("c1pf%ssf%s", pfID, sfIndex) + netdev, err := findNetdevWithPortNameCriteria(func(portName string) bool { + return portName == expectedPhysPortName + }) + + if err == nil { + return netdev, nil } + return "", fmt.Errorf("sf representor for pfID: %s, sfIndex: %s not found", pfID, sfIndex) +} + +// GetPfRepresentorDPU returns PF representor on DPU for a host PF identified by its ID. +func GetPfRepresentorDPU(pfID string) (string, error) { + // pfID should be 0 or 1 + if pfID != "0" && pfID != "1" { + return "", fmt.Errorf("unexpected pfID(%s). It should be 0 or 1", pfID) + } + + // match port name with external controller index + // NOTE: no support for Multi-Chassis DPUs + expectedPhysPortName := fmt.Sprintf("c1pf%s", pfID) netdev, err := findNetdevWithPortNameCriteria(func(portName string) bool { - // if phys port name == pfsf or c1pfsf we have a match - if _, ok := expectedPhysPortNames[portName]; ok { - return true - } - return false + return portName == expectedPhysPortName }) - if err != nil { - return "", fmt.Errorf("sf representor for pfID:%s, sfIndex:%s not found", pfID, sfIndex) + if err == nil { + return netdev, nil + } + + // match port name without controller index (legacy) + expectedPhysPortName = fmt.Sprintf("pf%s", pfID) + netdev, err = findNetdevWithPortNameCriteria(func(portName string) bool { + return portName == expectedPhysPortName + }) + + if err == nil { + return netdev, nil } - return netdev, nil + + return "", fmt.Errorf("pf representor for pfID: %s not found", pfID) } // GetRepresentorPortFlavour returns the representor port flavour @@ -351,15 +410,17 @@ func GetRepresentorPortFlavour(netdev string) (PortFlavour, error) { return PORT_FLAVOUR_UNKNOWN, err } - typeToRegex := map[PortFlavour]*regexp.Regexp{ - PORT_FLAVOUR_PHYSICAL: physPortRepRegex, - PORT_FLAVOUR_PCI_PF: pfPortRepRegex, - PORT_FLAVOUR_PCI_VF: vfPortRepRegex, - PORT_FLAVOUR_PCI_SF: sfPortRepRegex, + typeToRegex := map[PortFlavour][]*regexp.Regexp{ + PORT_FLAVOUR_PHYSICAL: {physPortRepRegex}, + PORT_FLAVOUR_PCI_PF: {pfPortRepRegex}, + PORT_FLAVOUR_PCI_VF: {vfPortRepRegex, vfPortRepRegexWithControllerIndex}, + PORT_FLAVOUR_PCI_SF: {sfPortRepRegex, sfPortRepRegexWithControllerIndex}, } - for flavour, regex := range typeToRegex { - if regex.MatchString(portName) { - return flavour, nil + for flavour, regexs := range typeToRegex { + for _, regex := range regexs { + if regex.MatchString(portName) { + return flavour, nil + } } } return PORT_FLAVOUR_UNKNOWN, nil @@ -473,7 +534,7 @@ func SetRepresentorPeerMacAddress(netdev string, mac net.HardwareAddr) error { if err != nil { return fmt.Errorf("failed to get phys_port_name for netdev %s: %v", netdev, err) } - pfID, vfIndex, err := parsePortName(physPortNameStr) + pfID, vfIndex, err := parseVFPortName(physPortNameStr) if err != nil { return fmt.Errorf("failed to get the pf and vf index for netdev %s "+ "with phys_port_name %s: %v", netdev, physPortNameStr, err) diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/utils.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/utils.go index 84772da95b..dd9ec282e0 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/utils.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/sriovnet/utils.go @@ -1,21 +1,18 @@ -/*---------------------------------------------------- - * - * 2022 NVIDIA CORPORATION & AFFILIATES - * - * Licensed under the Apache License, Version 2.0 (the License); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - *---------------------------------------------------- - */ +/* +Copyright 2023 NVIDIA CORPORATION & AFFILIATES + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ package sriovnet diff --git a/go-controller/vendor/github.com/spf13/afero/.editorconfig b/go-controller/vendor/github.com/spf13/afero/.editorconfig new file mode 100644 index 0000000000..4492e9f9fe --- /dev/null +++ b/go-controller/vendor/github.com/spf13/afero/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab diff --git a/go-controller/vendor/github.com/spf13/afero/.golangci.yaml b/go-controller/vendor/github.com/spf13/afero/.golangci.yaml new file mode 100644 index 0000000000..806289a250 --- /dev/null +++ b/go-controller/vendor/github.com/spf13/afero/.golangci.yaml @@ -0,0 +1,18 @@ +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/spf13/afero) + +linters: + disable-all: true + enable: + - gci + - gofmt + - gofumpt + - staticcheck + +issues: + exclude-dirs: + - gcsfs/internal/stiface diff --git a/go-controller/vendor/github.com/spf13/afero/README.md b/go-controller/vendor/github.com/spf13/afero/README.md index 3bafbfdfca..86f1545543 100644 --- a/go-controller/vendor/github.com/spf13/afero/README.md +++ b/go-controller/vendor/github.com/spf13/afero/README.md @@ -2,7 +2,11 @@ A FileSystem Abstraction System for Go -[![Test](https://github.com/spf13/afero/actions/workflows/test.yml/badge.svg)](https://github.com/spf13/afero/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/afero/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/afero/actions?query=workflow%3ACI) +[![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/afero?style=flat-square)](https://goreportcard.com/report/github.com/spf13/afero) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square) +[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/afero)](https://pkg.go.dev/mod/github.com/spf13/afero) # Overview @@ -12,7 +16,7 @@ types and methods. Afero has an exceptionally clean interface and simple design without needless constructors or initialization methods. Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with afero while retaining all the power +filesystems that make it easy to work with, while retaining all the power and benefit of the os and ioutil packages. Afero provides significant improvements over using the os package alone, most @@ -427,6 +431,39 @@ See the [Releases Page](https://github.com/spf13/afero/releases). 4. Push to the branch (`git push origin my-new-feature`) 5. Create new Pull Request +## Releasing + +As of version 1.14.0, Afero moved implementations with third-party libraries to +their own submodules. + +Releasing a new version now requires a few steps: + +``` +VERSION=X.Y.Z +git tag -a v$VERSION -m "Release $VERSION" +git push origin v$VERSION + +cd gcsfs +go get github.com/spf13/afero@v$VERSION +go mod tidy +git commit -am "Update afero to v$VERSION" +git tag -a gcsfs/v$VERSION -m "Release gcsfs $VERSION" +git push origin gcsfs/v$VERSION +cd .. + +cd sftpfs +go get github.com/spf13/afero@v$VERSION +go mod tidy +git commit -am "Update afero to v$VERSION" +git tag -a sftpfs/v$VERSION -m "Release sftpfs $VERSION" +git push origin sftpfs/v$VERSION +cd .. + +git push +``` + +TODO: move these instructions to a Makefile or something + ## Contributors Names in no particular order: diff --git a/go-controller/vendor/github.com/spf13/afero/const_bsds.go b/go-controller/vendor/github.com/spf13/afero/const_bsds.go index eed0f225fd..30855de572 100644 --- a/go-controller/vendor/github.com/spf13/afero/const_bsds.go +++ b/go-controller/vendor/github.com/spf13/afero/const_bsds.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly -// +build aix darwin openbsd freebsd netbsd dragonfly +//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly || zos +// +build aix darwin openbsd freebsd netbsd dragonfly zos package afero diff --git a/go-controller/vendor/github.com/spf13/afero/const_win_unix.go b/go-controller/vendor/github.com/spf13/afero/const_win_unix.go index 004d57e2ff..12792d21e2 100644 --- a/go-controller/vendor/github.com/spf13/afero/const_win_unix.go +++ b/go-controller/vendor/github.com/spf13/afero/const_win_unix.go @@ -10,8 +10,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix -// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix +//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix && !zos +// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix,!zos package afero diff --git a/go-controller/vendor/github.com/spf13/afero/iofs.go b/go-controller/vendor/github.com/spf13/afero/iofs.go index 938b9316e6..b13155ca4a 100644 --- a/go-controller/vendor/github.com/spf13/afero/iofs.go +++ b/go-controller/vendor/github.com/spf13/afero/iofs.go @@ -255,7 +255,6 @@ func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) { ret := make([]os.FileInfo, len(entries)) for i := range entries { ret[i], err = entries[i].Info() - if err != nil { return nil, err } diff --git a/go-controller/vendor/github.com/spf13/afero/memmap.go b/go-controller/vendor/github.com/spf13/afero/memmap.go index e6b7d70b94..ed92f5649d 100644 --- a/go-controller/vendor/github.com/spf13/afero/memmap.go +++ b/go-controller/vendor/github.com/spf13/afero/memmap.go @@ -19,6 +19,7 @@ import ( "log" "os" "path/filepath" + "sort" "strings" "sync" "time" @@ -88,6 +89,24 @@ func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { return pfile } +func (m *MemMapFs) findDescendants(name string) []*mem.FileData { + fData := m.getData() + descendants := make([]*mem.FileData, 0, len(fData)) + for p, dFile := range fData { + if strings.HasPrefix(p, name+FilePathSeparator) { + descendants = append(descendants, dFile) + } + } + + sort.Slice(descendants, func(i, j int) bool { + cur := len(strings.Split(descendants[i].Name(), FilePathSeparator)) + next := len(strings.Split(descendants[j].Name(), FilePathSeparator)) + return cur < next + }) + + return descendants +} + func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { if f == nil { return @@ -309,29 +328,51 @@ func (m *MemMapFs) Rename(oldname, newname string) error { if _, ok := m.getData()[oldname]; ok { m.mu.RUnlock() m.mu.Lock() - m.unRegisterWithParent(oldname) + err := m.unRegisterWithParent(oldname) + if err != nil { + return err + } + fileData := m.getData()[oldname] - delete(m.getData(), oldname) mem.ChangeFileName(fileData, newname) m.getData()[newname] = fileData + + err = m.renameDescendants(oldname, newname) + if err != nil { + return err + } + + delete(m.getData(), oldname) + m.registerWithParent(fileData, 0) m.mu.Unlock() m.mu.RLock() } else { return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} } + return nil +} - for p, fileData := range m.getData() { - if strings.HasPrefix(p, oldname+FilePathSeparator) { - m.mu.RUnlock() - m.mu.Lock() - delete(m.getData(), p) - p := strings.Replace(p, oldname, newname, 1) - m.getData()[p] = fileData - m.mu.Unlock() - m.mu.RLock() +func (m *MemMapFs) renameDescendants(oldname, newname string) error { + descendants := m.findDescendants(oldname) + removes := make([]string, 0, len(descendants)) + for _, desc := range descendants { + descNewName := strings.Replace(desc.Name(), oldname, newname, 1) + err := m.unRegisterWithParent(desc.Name()) + if err != nil { + return err } + + removes = append(removes, desc.Name()) + mem.ChangeFileName(desc, descNewName) + m.getData()[descNewName] = desc + + m.registerWithParent(desc, 0) + } + for _, r := range removes { + delete(m.getData(), r) } + return nil } diff --git a/go-controller/vendor/github.com/vishvananda/netlink/addr_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/addr_linux.go index 01c2306cb2..9e312043bf 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/addr_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "net" - "strings" "syscall" "github.com/vishvananda/netlink/nl" @@ -81,9 +80,6 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error msg.Index = uint32(addr.LinkIndex) } else { base := link.Attrs() - if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) { - return fmt.Errorf("label must begin with interface name") - } h.ensureIndex(base) msg.Index = uint32(base.Index) } diff --git a/go-controller/vendor/github.com/vishvananda/netlink/bridge_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/bridge_linux.go index fa5766b801..ec941e5c78 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/bridge_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/bridge_linux.go @@ -3,11 +3,102 @@ package netlink import ( "errors" "fmt" + "syscall" "github.com/vishvananda/netlink/nl" "golang.org/x/sys/unix" ) +// BridgeVlanTunnelShow gets vlanid-tunnelid mapping. +// Equivalent to: `bridge vlan tunnelshow` +// +// If the returned error is [ErrDumpInterrupted], results may be inconsistent +// or incomplete. +func BridgeVlanTunnelShow() ([]nl.TunnelInfo, error) { + return pkgHandle.BridgeVlanTunnelShow() +} + +func (h *Handle) BridgeVlanTunnelShow() ([]nl.TunnelInfo, error) { + req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) + msg := nl.NewIfInfomsg(unix.AF_BRIDGE) + req.AddData(msg) + req.AddData(nl.NewRtAttr(unix.IFLA_EXT_MASK, nl.Uint32Attr(uint32(nl.RTEXT_FILTER_BRVLAN)))) + + msgs, executeErr := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) + if executeErr != nil && !errors.Is(executeErr, ErrDumpInterrupted) { + return nil, executeErr + } + ret := make([]nl.TunnelInfo, 0) + for _, m := range msgs { + msg := nl.DeserializeIfInfomsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + for _, attr := range attrs { + switch attr.Attr.Type { + case unix.IFLA_AF_SPEC: + nestedAttrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, fmt.Errorf("failed to parse nested attr %v", err) + } + for _, nestAttr := range nestedAttrs { + switch nestAttr.Attr.Type { + case nl.IFLA_BRIDGE_VLAN_TUNNEL_INFO: + ret, err = parseTunnelInfo(&nestAttr, ret) + if err != nil { + return nil, fmt.Errorf("failed to parse tunnelinfo %v", err) + } + } + } + } + } + } + return ret, executeErr +} + +func parseTunnelInfo(nestAttr *syscall.NetlinkRouteAttr, results []nl.TunnelInfo) ([]nl.TunnelInfo, error) { + tunnelInfos, err := nl.ParseRouteAttr(nestAttr.Value) + if err != nil { + return nil, fmt.Errorf("failed to parse nested attr %v", err) + } + var tunnelId uint32 + var vid uint16 + var flag uint16 + for _, tunnelInfo := range tunnelInfos { + switch tunnelInfo.Attr.Type { + case nl.IFLA_BRIDGE_VLAN_TUNNEL_ID: + tunnelId = native.Uint32(tunnelInfo.Value) + case nl.IFLA_BRIDGE_VLAN_TUNNEL_VID: + vid = native.Uint16(tunnelInfo.Value) + case nl.IFLA_BRIDGE_VLAN_TUNNEL_FLAGS: + flag = native.Uint16(tunnelInfo.Value) + } + } + + if flag == nl.BRIDGE_VLAN_INFO_RANGE_END { + lastTi := results[len(results)-1] + vni := lastTi.TunId + 1 + for i := lastTi.Vid + 1; i < vid; i++ { + t := nl.TunnelInfo{ + TunId: vni, + Vid: i, + } + results = append(results, t) + vni++ + } + } + + t := nl.TunnelInfo{ + TunId: tunnelId, + Vid: vid, + } + + results = append(results, t) + return results, nil +} + // BridgeVlanList gets a map of device id to bridge vlan infos. // Equivalent to: `bridge vlan show` // @@ -61,6 +152,38 @@ func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { return ret, executeErr } +// BridgeVlanAddTunnelInfo adds a new vlan filter entry +// Equivalent to: `bridge vlan add dev DEV vid VID tunnel_info id TUNID [ self ] [ master ]` +func BridgeVlanAddTunnelInfo(link Link, vid uint16, tunid uint32, self, master bool) error { + return pkgHandle.BridgeVlanAddTunnelInfo(link, vid, 0, tunid, 0, self, master) +} + +// BridgeVlanAddRangeTunnelInfoRange adds a new vlan filter entry +// Equivalent to: `bridge vlan add dev DEV vid VID-VIDEND tunnel_info id VIN-VINEND [ self ] [ master ]` +func BridgeVlanAddRangeTunnelInfoRange(link Link, vid, vidEnd uint16, tunid, tunidEnd uint32, self, master bool) error { + return pkgHandle.BridgeVlanAddTunnelInfo(link, vid, vidEnd, tunid, tunidEnd, self, master) +} + +func (h *Handle) BridgeVlanAddTunnelInfo(link Link, vid, vidEnd uint16, tunid, tunidEnd uint32, self, master bool) error { + return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, vidEnd, tunid, tunidEnd, false, false, self, master) +} + +// BridgeVlanDelTunnelInfo adds a new vlan filter entry +// Equivalent to: `bridge vlan del dev DEV vid VID tunnel_info id TUNID [ self ] [ master ]` +func BridgeVlanDelTunnelInfo(link Link, vid uint16, tunid uint32, self, master bool) error { + return pkgHandle.BridgeVlanDelTunnelInfo(link, vid, 0, tunid, 0, self, master) +} + +// BridgeVlanDelRangeTunnelInfoRange adds a new vlan filter entry +// Equivalent to: `bridge vlan del dev DEV vid VID-VIDEND tunnel_info id VIN-VINEND [ self ] [ master ]` +func BridgeVlanDelRangeTunnelInfoRange(link Link, vid, vidEnd uint16, tunid, tunidEnd uint32, self, master bool) error { + return pkgHandle.BridgeVlanDelTunnelInfo(link, vid, vidEnd, tunid, tunidEnd, self, master) +} + +func (h *Handle) BridgeVlanDelTunnelInfo(link Link, vid, vidEnd uint16, tunid, tunidEnd uint32, self, master bool) error { + return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, vidEnd, tunid, tunidEnd, false, false, self, master) +} + // BridgeVlanAdd adds a new vlan filter entry // Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error { @@ -70,7 +193,7 @@ func BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) err // BridgeVlanAdd adds a new vlan filter entry // Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, 0, pvid, untagged, self, master) + return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, 0, 0, 0, pvid, untagged, self, master) } // BridgeVlanAddRange adds a new vlan filter entry @@ -82,7 +205,7 @@ func BridgeVlanAddRange(link Link, vid, vidEnd uint16, pvid, untagged, self, mas // BridgeVlanAddRange adds a new vlan filter entry // Equivalent to: `bridge vlan add dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanAddRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, vidEnd, pvid, untagged, self, master) + return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, vidEnd, 0, 0, pvid, untagged, self, master) } // BridgeVlanDel adds a new vlan filter entry @@ -94,7 +217,7 @@ func BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) err // BridgeVlanDel adds a new vlan filter entry // Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, 0, pvid, untagged, self, master) + return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, 0, 0, 0, pvid, untagged, self, master) } // BridgeVlanDelRange adds a new vlan filter entry @@ -106,10 +229,10 @@ func BridgeVlanDelRange(link Link, vid, vidEnd uint16, pvid, untagged, self, mas // BridgeVlanDelRange adds a new vlan filter entry // Equivalent to: `bridge vlan del dev DEV vid VID-VIDEND [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanDelRange(link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, vidEnd, pvid, untagged, self, master) + return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, vidEnd, 0, 0, pvid, untagged, self, master) } -func (h *Handle) bridgeVlanModify(cmd int, link Link, vid, vidEnd uint16, pvid, untagged, self, master bool) error { +func (h *Handle) bridgeVlanModify(cmd int, link Link, vid, vidEnd uint16, tunid, tunidEnd uint32, pvid, untagged, self, master bool) error { base := link.Attrs() h.ensureIndex(base) req := h.newNetlinkRequest(cmd, unix.NLM_F_ACK) @@ -129,25 +252,45 @@ func (h *Handle) bridgeVlanModify(cmd int, link Link, vid, vidEnd uint16, pvid, if flags > 0 { br.AddRtAttr(nl.IFLA_BRIDGE_FLAGS, nl.Uint16Attr(flags)) } - vlanInfo := &nl.BridgeVlanInfo{Vid: vid} - if pvid { - vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_PVID - } - if untagged { - vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_UNTAGGED - } - if vidEnd != 0 { - vlanEndInfo := &nl.BridgeVlanInfo{Vid: vidEnd} - vlanEndInfo.Flags = vlanInfo.Flags + if tunid != 0 { + if tunidEnd != 0 { + tiStart := br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_TUNNEL_INFO, nil) + tiStart.AddRtAttr(nl.IFLA_BRIDGE_VLAN_TUNNEL_ID, nl.Uint32Attr(tunid)) + tiStart.AddRtAttr(nl.IFLA_BRIDGE_VLAN_TUNNEL_VID, nl.Uint16Attr(vid)) + tiStart.AddRtAttr(nl.IFLA_BRIDGE_VLAN_TUNNEL_FLAGS, nl.Uint16Attr(nl.BRIDGE_VLAN_INFO_RANGE_BEGIN)) + + tiEnd := br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_TUNNEL_INFO, nil) + tiEnd.AddRtAttr(nl.IFLA_BRIDGE_VLAN_TUNNEL_ID, nl.Uint32Attr(tunidEnd)) + tiEnd.AddRtAttr(nl.IFLA_BRIDGE_VLAN_TUNNEL_VID, nl.Uint16Attr(vidEnd)) + tiEnd.AddRtAttr(nl.IFLA_BRIDGE_VLAN_TUNNEL_FLAGS, nl.Uint16Attr(nl.BRIDGE_VLAN_INFO_RANGE_END)) + } else { + ti := br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_TUNNEL_INFO, nil) + ti.AddRtAttr(nl.IFLA_BRIDGE_VLAN_TUNNEL_ID, nl.Uint32Attr(tunid)) + ti.AddRtAttr(nl.IFLA_BRIDGE_VLAN_TUNNEL_VID, nl.Uint16Attr(vid)) + ti.AddRtAttr(nl.IFLA_BRIDGE_VLAN_TUNNEL_FLAGS, nl.Uint16Attr(0)) + } + } else { + vlanInfo := &nl.BridgeVlanInfo{Vid: vid} + if pvid { + vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_PVID + } + if untagged { + vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_UNTAGGED + } + + if vidEnd != 0 { + vlanEndInfo := &nl.BridgeVlanInfo{Vid: vidEnd} + vlanEndInfo.Flags = vlanInfo.Flags - vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_RANGE_BEGIN - br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) + vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_RANGE_BEGIN + br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) - vlanEndInfo.Flags |= nl.BRIDGE_VLAN_INFO_RANGE_END - br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanEndInfo.Serialize()) - } else { - br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) + vlanEndInfo.Flags |= nl.BRIDGE_VLAN_INFO_RANGE_END + br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanEndInfo.Serialize()) + } else { + br.AddRtAttr(nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) + } } req.AddData(br) diff --git a/go-controller/vendor/github.com/vishvananda/netlink/filter.go b/go-controller/vendor/github.com/vishvananda/netlink/filter.go index a722e0a27b..fbb3b6a570 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/filter.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/filter.go @@ -398,6 +398,29 @@ func NewPoliceAction() *PoliceAction { } } +type SampleAction struct { + ActionAttrs + Group uint32 + Rate uint32 + TruncSize uint32 +} + +func (action *SampleAction) Type() string { + return "sample" +} + +func (action *SampleAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewSampleAction() *SampleAction { + return &SampleAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_PIPE, + }, + } +} + // MatchAll filters match all packets type MatchAll struct { FilterAttrs diff --git a/go-controller/vendor/github.com/vishvananda/netlink/filter_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/filter_linux.go index 404e50d524..255e591d81 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -54,25 +54,30 @@ func (filter *U32) Type() string { type Flower struct { FilterAttrs - DestIP net.IP - DestIPMask net.IPMask - SrcIP net.IP - SrcIPMask net.IPMask - EthType uint16 - EncDestIP net.IP - EncDestIPMask net.IPMask - EncSrcIP net.IP - EncSrcIPMask net.IPMask - EncDestPort uint16 - EncKeyId uint32 - SrcMac net.HardwareAddr - DestMac net.HardwareAddr - VlanId uint16 - SkipHw bool - SkipSw bool - IPProto *nl.IPProto - DestPort uint16 - SrcPort uint16 + ClassId uint32 + DestIP net.IP + DestIPMask net.IPMask + SrcIP net.IP + SrcIPMask net.IPMask + EthType uint16 + EncDestIP net.IP + EncDestIPMask net.IPMask + EncSrcIP net.IP + EncSrcIPMask net.IPMask + EncDestPort uint16 + EncKeyId uint32 + SrcMac net.HardwareAddr + DestMac net.HardwareAddr + VlanId uint16 + SkipHw bool + SkipSw bool + IPProto *nl.IPProto + DestPort uint16 + SrcPort uint16 + SrcPortRangeMin uint16 + SrcPortRangeMax uint16 + DstPortRangeMin uint16 + DstPortRangeMax uint16 Actions []Action } @@ -171,6 +176,19 @@ func (filter *Flower) encode(parent *nl.RtAttr) error { } } } + if filter.SrcPortRangeMin != 0 && filter.SrcPortRangeMax != 0 { + parent.AddRtAttr(nl.TCA_FLOWER_KEY_PORT_SRC_MIN, htons(filter.SrcPortRangeMin)) + parent.AddRtAttr(nl.TCA_FLOWER_KEY_PORT_SRC_MAX, htons(filter.SrcPortRangeMax)) + } + + if filter.DstPortRangeMin != 0 && filter.DstPortRangeMax != 0 { + parent.AddRtAttr(nl.TCA_FLOWER_KEY_PORT_DST_MIN, htons(filter.DstPortRangeMin)) + parent.AddRtAttr(nl.TCA_FLOWER_KEY_PORT_DST_MAX, htons(filter.DstPortRangeMax)) + } + + if filter.ClassId != 0 { + parent.AddRtAttr(nl.TCA_FLOWER_CLASSID, nl.Uint32Attr(filter.ClassId)) + } var flags uint32 = 0 if filter.SkipHw { @@ -247,6 +265,16 @@ func (filter *Flower) decode(data []syscall.NetlinkRouteAttr) error { if skipHw != 0 { filter.SkipHw = true } + case nl.TCA_FLOWER_KEY_PORT_SRC_MIN: + filter.SrcPortRangeMin = ntohs(datum.Value) + case nl.TCA_FLOWER_KEY_PORT_SRC_MAX: + filter.SrcPortRangeMax = ntohs(datum.Value) + case nl.TCA_FLOWER_KEY_PORT_DST_MIN: + filter.DstPortRangeMin = ntohs(datum.Value) + case nl.TCA_FLOWER_KEY_PORT_DST_MAX: + filter.DstPortRangeMax = ntohs(datum.Value) + case nl.TCA_FLOWER_CLASSID: + filter.ClassId = native.Uint32(datum.Value) } } return nil @@ -740,6 +768,17 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { aopts.AddRtAttr(nl.TCA_ACT_BPF_PARMS, gen.Serialize()) aopts.AddRtAttr(nl.TCA_ACT_BPF_FD, nl.Uint32Attr(uint32(action.Fd))) aopts.AddRtAttr(nl.TCA_ACT_BPF_NAME, nl.ZeroTerminated(action.Name)) + case *SampleAction: + table := attr.AddRtAttr(tabIndex, nil) + tabIndex++ + table.AddRtAttr(nl.TCA_ACT_KIND, nl.ZeroTerminated("sample")) + aopts := table.AddRtAttr(nl.TCA_ACT_OPTIONS, nil) + gen := nl.TcGen{} + toTcGen(action.Attrs(), &gen) + aopts.AddRtAttr(nl.TCA_ACT_SAMPLE_PARMS, gen.Serialize()) + aopts.AddRtAttr(nl.TCA_ACT_SAMPLE_RATE, nl.Uint32Attr(action.Rate)) + aopts.AddRtAttr(nl.TCA_ACT_SAMPLE_PSAMPLE_GROUP, nl.Uint32Attr(action.Group)) + aopts.AddRtAttr(nl.TCA_ACT_SAMPLE_TRUNC_SIZE, nl.Uint32Attr(action.TruncSize)) case *GenericAction: table := attr.AddRtAttr(tabIndex, nil) tabIndex++ @@ -752,6 +791,7 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { table := attr.AddRtAttr(tabIndex, nil) tabIndex++ pedit := nl.TcPedit{} + toTcGen(action.Attrs(), &pedit.Sel.TcGen) if action.SrcMacAddr != nil { pedit.SetEthSrc(action.SrcMacAddr) } @@ -825,6 +865,8 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { action = &ConnmarkAction{} case "csum": action = &CsumAction{} + case "sample": + action = &SampleAction{} case "gact": action = &GenericAction{} case "vlan": @@ -949,6 +991,18 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { tcTs := nl.DeserializeTcf(adatum.Value) actionTimestamp = toTimeStamp(tcTs) } + case "sample": + switch adatum.Attr.Type { + case nl.TCA_ACT_SAMPLE_PARMS: + gen := *nl.DeserializeTcGen(adatum.Value) + toAttrs(&gen, action.Attrs()) + case nl.TCA_ACT_SAMPLE_RATE: + action.(*SampleAction).Rate = native.Uint32(adatum.Value[0:4]) + case nl.TCA_ACT_SAMPLE_PSAMPLE_GROUP: + action.(*SampleAction).Group = native.Uint32(adatum.Value[0:4]) + case nl.TCA_ACT_SAMPLE_TRUNC_SIZE: + action.(*SampleAction).TruncSize = native.Uint32(adatum.Value[0:4]) + } case "gact": switch adatum.Attr.Type { case nl.TCA_GACT_PARMS: diff --git a/go-controller/vendor/github.com/vishvananda/netlink/handle_unspecified.go b/go-controller/vendor/github.com/vishvananda/netlink/handle_unspecified.go index 3fe03642e5..185e671519 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/handle_unspecified.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/handle_unspecified.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package netlink @@ -183,6 +184,10 @@ func (h *Handle) LinkSetGROIPv4MaxSize(link Link, maxSize int) error { return ErrNotImplemented } +func (h *Handle) LinkSetIP6AddrGenMode(link Link, mode int) error { + return ErrNotImplemented +} + func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { return ErrNotImplemented } diff --git a/go-controller/vendor/github.com/vishvananda/netlink/ioctl_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/ioctl_linux.go index 4d33db5da5..f8da92e214 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/ioctl_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/ioctl_linux.go @@ -86,5 +86,5 @@ func newIocltStringSetReq(linkName string) (*Ifreq, *ethtoolSset) { // getSocketUDP returns file descriptor to new UDP socket // It is used for communication with ioctl interface. func getSocketUDP() (int, error) { - return syscall.Socket(unix.AF_INET, unix.SOCK_DGRAM, 0) + return syscall.Socket(unix.AF_INET, unix.SOCK_DGRAM|unix.SOCK_CLOEXEC, 0) } diff --git a/go-controller/vendor/github.com/vishvananda/netlink/ipset_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/ipset_linux.go index f4c05229fa..7730992ee3 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/ipset_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/ipset_linux.go @@ -147,9 +147,11 @@ func (h *Handle) IpsetCreate(setname, typename string, options IpsetCreateOption req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname))) req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_TYPENAME, nl.ZeroTerminated(typename))) + cadtFlags := optionsToBitflag(options) + revision := options.Revision if revision == 0 { - revision = getIpsetDefaultWithTypeName(typename) + revision = getIpsetDefaultRevision(typename, cadtFlags) } req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_REVISION, nl.Uint8Attr(revision))) @@ -181,18 +183,6 @@ func (h *Handle) IpsetCreate(setname, typename string, options IpsetCreateOption data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_TIMEOUT | nl.NLA_F_NET_BYTEORDER, Value: *timeout}) } - var cadtFlags uint32 - - if options.Comments { - cadtFlags |= nl.IPSET_FLAG_WITH_COMMENT - } - if options.Counters { - cadtFlags |= nl.IPSET_FLAG_WITH_COUNTERS - } - if options.Skbinfo { - cadtFlags |= nl.IPSET_FLAG_WITH_SKBINFO - } - if cadtFlags != 0 { data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_CADT_FLAGS | nl.NLA_F_NET_BYTEORDER, Value: cadtFlags}) } @@ -395,14 +385,89 @@ func (h *Handle) newIpsetRequest(cmd int) *nl.NetlinkRequest { return req } -func getIpsetDefaultWithTypeName(typename string) uint8 { +// NOTE: This can't just take typename into account, it also has to take desired +// feature support into account, on a per-set-type basis, to return the correct revision, see e.g. +// https://github.com/Olipro/ipset/blob/9f145b49100104d6570fe5c31a5236816ebb4f8f/kernel/net/netfilter/ipset/ip_set_hash_ipport.c#L30 +// +// This means that whenever a new "type" of ipset is added, returning the "correct" default revision +// requires adding a new case here for that type, and consulting the ipset C code to figure out the correct +// combination of type name, feature bit flags, and revision ranges. +// +// Care should be taken as some types share the same revision ranges for the same features, and others do not. +// When in doubt, mimic the C code. +func getIpsetDefaultRevision(typename string, featureFlags uint32) uint8 { switch typename { case "hash:ip,port", - "hash:ip,port,ip", - "hash:ip,port,net", + "hash:ip,port,ip": + // Taken from + // - ipset/kernel/net/netfilter/ipset/ip_set_hash_ipport.c + // - ipset/kernel/net/netfilter/ipset/ip_set_hash_ipportip.c + if (featureFlags & nl.IPSET_FLAG_WITH_SKBINFO) != 0 { + return 5 + } + + if (featureFlags & nl.IPSET_FLAG_WITH_FORCEADD) != 0 { + return 4 + } + + if (featureFlags & nl.IPSET_FLAG_WITH_COMMENT) != 0 { + return 3 + } + + if (featureFlags & nl.IPSET_FLAG_WITH_COUNTERS) != 0 { + return 2 + } + + // the min revision this library supports for this type + return 1 + + case "hash:ip,port,net", "hash:net,port": + // Taken from + // - ipset/kernel/net/netfilter/ipset/ip_set_hash_ipportnet.c + // - ipset/kernel/net/netfilter/ipset/ip_set_hash_netport.c + if (featureFlags & nl.IPSET_FLAG_WITH_SKBINFO) != 0 { + return 7 + } + + if (featureFlags & nl.IPSET_FLAG_WITH_FORCEADD) != 0 { + return 6 + } + + if (featureFlags & nl.IPSET_FLAG_WITH_COMMENT) != 0 { + return 5 + } + + if (featureFlags & nl.IPSET_FLAG_WITH_COUNTERS) != 0 { + return 4 + } + + if (featureFlags & nl.IPSET_FLAG_NOMATCH) != 0 { + return 3 + } + // the min revision this library supports for this type + return 2 + + case "hash:ip": + // Taken from + // - ipset/kernel/net/netfilter/ipset/ip_set_hash_ip.c + if (featureFlags & nl.IPSET_FLAG_WITH_SKBINFO) != 0 { + return 4 + } + + if (featureFlags & nl.IPSET_FLAG_WITH_FORCEADD) != 0 { + return 3 + } + + if (featureFlags & nl.IPSET_FLAG_WITH_COMMENT) != 0 { + return 2 + } + + // the min revision this library supports for this type return 1 } + + // can't map the correct revision for this type. return 0 } @@ -579,3 +644,19 @@ func parseIPSetEntry(data []byte) (entry IPSetEntry) { } return } + +func optionsToBitflag(options IpsetCreateOptions) uint32 { + var cadtFlags uint32 + + if options.Comments { + cadtFlags |= nl.IPSET_FLAG_WITH_COMMENT + } + if options.Counters { + cadtFlags |= nl.IPSET_FLAG_WITH_COUNTERS + } + if options.Skbinfo { + cadtFlags |= nl.IPSET_FLAG_WITH_SKBINFO + } + + return cadtFlags +} diff --git a/go-controller/vendor/github.com/vishvananda/netlink/link.go b/go-controller/vendor/github.com/vishvananda/netlink/link.go index cccf5d792a..42cb38bddb 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/link.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/link.go @@ -290,8 +290,15 @@ func (bridge *Bridge) Type() string { // Vlan links have ParentIndex set in their Attrs() type Vlan struct { LinkAttrs - VlanId int - VlanProtocol VlanProtocol + VlanId int + VlanProtocol VlanProtocol + IngressQosMap map[uint32]uint32 + EgressQosMap map[uint32]uint32 + ReorderHdr *bool + Gvrp *bool + LooseBinding *bool + Mvrp *bool + BridgeBinding *bool } func (vlan *Vlan) Attrs() *LinkAttrs { @@ -348,13 +355,14 @@ type TuntapFlag uint16 // Tuntap links created via /dev/tun/tap, but can be destroyed via netlink type Tuntap struct { LinkAttrs - Mode TuntapMode - Flags TuntapFlag - NonPersist bool - Queues int - Fds []*os.File - Owner uint32 - Group uint32 + Mode TuntapMode + Flags TuntapFlag + NonPersist bool + Queues int + DisabledQueues int + Fds []*os.File + Owner uint32 + Group uint32 } func (tuntap *Tuntap) Attrs() *LinkAttrs { @@ -425,6 +433,17 @@ type Veth struct { PeerName string // veth on create only PeerHardwareAddr net.HardwareAddr PeerNamespace interface{} + PeerTxQLen int + PeerNumTxQueues uint32 + PeerNumRxQueues uint32 + PeerMTU uint32 +} + +func NewVeth(attr LinkAttrs) *Veth { + return &Veth{ + LinkAttrs: attr, + PeerTxQLen: -1, + } } func (veth *Veth) Attrs() *LinkAttrs { @@ -1058,6 +1077,8 @@ type Geneve struct { FlowBased bool InnerProtoInherit bool Df GeneveDf + PortLow int + PortHigh int } func (geneve *Geneve) Attrs() *LinkAttrs { diff --git a/go-controller/vendor/github.com/vishvananda/netlink/link_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/link_linux.go index d6bffded31..e26efb449a 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/link_linux.go @@ -1683,6 +1683,73 @@ func (h *Handle) linkModify(link Link, flags int) error { native.PutUint16(b, uint16(link.VlanId)) data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) data.AddRtAttr(nl.IFLA_VLAN_ID, b) + var vlanFlags uint32 + var vlanFlagsMask uint32 + if link.ReorderHdr != nil { + vlanFlagsMask |= nl.VLAN_FLAG_REORDER_HDR + if *link.ReorderHdr { + vlanFlags |= nl.VLAN_FLAG_REORDER_HDR + } else { + vlanFlags &= ^uint32(nl.VLAN_FLAG_REORDER_HDR) + } + } + if link.Gvrp != nil { + vlanFlagsMask |= nl.VLAN_FLAG_GVRP + if *link.Gvrp { + vlanFlags |= nl.VLAN_FLAG_GVRP + } else { + vlanFlags &= ^uint32(nl.VLAN_FLAG_GVRP) + } + } + if link.Mvrp != nil { + vlanFlagsMask |= nl.VLAN_FLAG_MVRP + if *link.Mvrp { + vlanFlags |= nl.VLAN_FLAG_MVRP + } else { + vlanFlags &= ^uint32(nl.VLAN_FLAG_MVRP) + } + } + if link.LooseBinding != nil { + vlanFlagsMask |= nl.VLAN_FLAG_LOOSE_BINDING + if *link.LooseBinding { + vlanFlags |= nl.VLAN_FLAG_LOOSE_BINDING + } else { + vlanFlags &= ^uint32(nl.VLAN_FLAG_LOOSE_BINDING) + } + } + if link.BridgeBinding != nil { + vlanFlagsMask |= nl.VLAN_FLAG_BRIDGE_BINDING + if *link.BridgeBinding { + vlanFlags |= nl.VLAN_FLAG_BRIDGE_BINDING + } else { + vlanFlags &= ^uint32(nl.VLAN_FLAG_BRIDGE_BINDING) + } + } + + buf := &bytes.Buffer{} + buf.Write(nl.Uint32Attr(vlanFlags)) + buf.Write(nl.Uint32Attr(vlanFlagsMask)) + data.AddRtAttr(nl.IFLA_VLAN_FLAGS, buf.Bytes()) + + if link.IngressQosMap != nil { + ingressMap := data.AddRtAttr(nl.IFLA_VLAN_INGRESS_QOS, nil) + for from, to := range link.IngressQosMap { + buf := &bytes.Buffer{} + buf.Write(nl.Uint32Attr(from)) + buf.Write(nl.Uint32Attr(to)) + ingressMap.AddRtAttr(nl.IFLA_VLAN_QOS_MAPPING, buf.Bytes()) + } + } + + if link.EgressQosMap != nil { + egressMap := data.AddRtAttr(nl.IFLA_VLAN_EGRESS_QOS, nil) + for from, to := range link.EgressQosMap { + buf := &bytes.Buffer{} + buf.Write(nl.Uint32Attr(from)) + buf.Write(nl.Uint32Attr(to)) + egressMap.AddRtAttr(nl.IFLA_VLAN_QOS_MAPPING, buf.Bytes()) + } + } if link.VlanProtocol != VLAN_PROTOCOL_UNKNOWN { data.AddRtAttr(nl.IFLA_VLAN_PROTOCOL, htons(uint16(link.VlanProtocol))) @@ -1696,16 +1763,25 @@ func (h *Handle) linkModify(link Link, flags int) error { peer := data.AddRtAttr(nl.VETH_INFO_PEER, nil) nl.NewIfInfomsgChild(peer, unix.AF_UNSPEC) peer.AddRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName)) - if base.TxQLen >= 0 { + + if link.PeerTxQLen >= 0 { + peer.AddRtAttr(unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(link.PeerTxQLen))) + } else if base.TxQLen >= 0 { peer.AddRtAttr(unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) } - if base.NumTxQueues > 0 { + if link.PeerNumTxQueues > 0 { + peer.AddRtAttr(unix.IFLA_NUM_TX_QUEUES, nl.Uint32Attr(link.PeerNumTxQueues)) + } else if base.NumTxQueues > 0 { peer.AddRtAttr(unix.IFLA_NUM_TX_QUEUES, nl.Uint32Attr(uint32(base.NumTxQueues))) } - if base.NumRxQueues > 0 { + if link.PeerNumRxQueues > 0 { + peer.AddRtAttr(unix.IFLA_NUM_RX_QUEUES, nl.Uint32Attr(link.PeerNumRxQueues)) + } else if base.NumRxQueues > 0 { peer.AddRtAttr(unix.IFLA_NUM_RX_QUEUES, nl.Uint32Attr(uint32(base.NumRxQueues))) } - if base.MTU > 0 { + if link.PeerMTU > 0 { + peer.AddRtAttr(unix.IFLA_MTU, nl.Uint32Attr(link.PeerMTU)) + } else if base.MTU > 0 { peer.AddRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) } if link.PeerHardwareAddr != nil { @@ -2544,6 +2620,14 @@ func (h *Handle) LinkSetLearning(link Link, mode bool) error { return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_LEARNING) } +func LinkSetVlanTunnel(link Link, mode bool) error { + return pkgHandle.LinkSetVlanTunnel(link, mode) +} + +func (h *Handle) LinkSetVlanTunnel(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_VLAN_TUNNEL) +} + func LinkSetRootBlock(link Link, mode bool) error { return pkgHandle.LinkSetRootBlock(link, mode) } @@ -2670,9 +2754,38 @@ func (h *Handle) LinkSetGroup(link Link, group int) error { return err } +// LinkSetIP6AddrGenMode sets the IPv6 address generation mode of the link device. +// Equivalent to: `ip link set $link addrgenmode $mode` +func LinkSetIP6AddrGenMode(link Link, mode int) error { + return pkgHandle.LinkSetIP6AddrGenMode(link, mode) +} + +// LinkSetIP6AddrGenMode sets the IPv6 address generation mode of the link device. +// Equivalent to: `ip link set $link addrgenmode $mode` +func (h *Handle) LinkSetIP6AddrGenMode(link Link, mode int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + + msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 1) + b[0] = uint8(mode) + + data := nl.NewRtAttr(unix.IFLA_INET6_ADDR_GEN_MODE, b) + af := nl.NewRtAttr(unix.AF_INET6, data.Serialize()) + spec := nl.NewRtAttr(unix.IFLA_AF_SPEC, af.Serialize()) + req.AddData(spec) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + func addNetkitAttrs(nk *Netkit, linkInfo *nl.RtAttr, flag int) error { - if nk.peerLinkAttrs.HardwareAddr != nil || nk.HardwareAddr != nil { - return fmt.Errorf("netkit doesn't support setting Ethernet") + if nk.Mode != NETKIT_MODE_L2 && (nk.LinkAttrs.HardwareAddr != nil || nk.peerLinkAttrs.HardwareAddr != nil) { + return fmt.Errorf("netkit only allows setting Ethernet in L2 mode") } data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) @@ -2724,6 +2837,9 @@ func addNetkitAttrs(nk *Netkit, linkInfo *nl.RtAttr, flag int) error { peer.AddRtAttr(unix.IFLA_NET_NS_FD, nl.Uint32Attr(uint32(ns))) } } + if nk.peerLinkAttrs.HardwareAddr != nil { + peer.AddRtAttr(unix.IFLA_ADDRESS, []byte(nk.peerLinkAttrs.HardwareAddr)) + } return nil } @@ -2752,12 +2868,65 @@ func parseNetkitData(link Link, data []syscall.NetlinkRouteAttr) { } } +func parseVlanQosMap(data []byte) map[uint32]uint32 { + values, err := nl.ParseRouteAttr(data) + if err != nil { + return nil + } + + qosMap := make(map[uint32]uint32) + + for _, value := range values { + switch value.Attr.Type { + case nl.IFLA_VLAN_QOS_MAPPING: + from := native.Uint32(value.Value[:4]) + to := native.Uint32(value.Value[4:]) + qosMap[from] = to + } + } + + return qosMap +} + func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) { vlan := link.(*Vlan) for _, datum := range data { switch datum.Attr.Type { case nl.IFLA_VLAN_ID: vlan.VlanId = int(native.Uint16(datum.Value[0:2])) + case nl.IFLA_VLAN_FLAGS: + flags := native.Uint32(datum.Value[0:4]) + trueVal := true + falseVal := false + if flags&nl.VLAN_FLAG_REORDER_HDR != 0 { + vlan.ReorderHdr = &trueVal + } else { + vlan.ReorderHdr = &falseVal + } + if flags&nl.VLAN_FLAG_GVRP != 0 { + vlan.Gvrp = &trueVal + } else { + vlan.Gvrp = &falseVal + } + if flags&nl.VLAN_FLAG_LOOSE_BINDING != 0 { + vlan.LooseBinding = &trueVal + } else { + vlan.LooseBinding = &falseVal + } + if flags&nl.VLAN_FLAG_MVRP != 0 { + vlan.Mvrp = &trueVal + } else { + vlan.Mvrp = &falseVal + } + if flags&nl.VLAN_FLAG_BRIDGE_BINDING != 0 { + vlan.BridgeBinding = &trueVal + } else { + vlan.BridgeBinding = &falseVal + } + case nl.IFLA_VLAN_EGRESS_QOS: + vlan.EgressQosMap = parseVlanQosMap(datum.Value) + case nl.IFLA_VLAN_INGRESS_QOS: + vlan.IngressQosMap = parseVlanQosMap(datum.Value) case nl.IFLA_VLAN_PROTOCOL: vlan.VlanProtocol = VlanProtocol(int(ntohs(datum.Value[0:2]))) } @@ -2821,7 +2990,7 @@ func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_VXLAN_PORT_RANGE: buf := bytes.NewBuffer(datum.Value[0:4]) var pr vxlanPortRange - if binary.Read(buf, binary.BigEndian, &pr) != nil { + if binary.Read(buf, binary.BigEndian, &pr) == nil { vxlan.PortLow = int(pr.Lo) vxlan.PortHigh = int(pr.Hi) } @@ -3068,6 +3237,10 @@ func linkFlags(rawFlags uint32) net.Flags { return f } +type genevePortRange struct { + Lo, Hi uint16 +} + func addGeneveAttrs(geneve *Geneve, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) @@ -3104,6 +3277,15 @@ func addGeneveAttrs(geneve *Geneve, linkInfo *nl.RtAttr) { data.AddRtAttr(nl.IFLA_GENEVE_TOS, nl.Uint8Attr(geneve.Tos)) } + if geneve.PortLow > 0 || geneve.PortHigh > 0 { + pr := genevePortRange{uint16(geneve.PortLow), uint16(geneve.PortHigh)} + + buf := new(bytes.Buffer) + binary.Write(buf, binary.BigEndian, &pr) + + data.AddRtAttr(nl.IFLA_GENEVE_PORT_RANGE, buf.Bytes()) + } + data.AddRtAttr(nl.IFLA_GENEVE_DF, nl.Uint8Attr(uint8(geneve.Df))) } @@ -3125,6 +3307,13 @@ func parseGeneveData(link Link, data []syscall.NetlinkRouteAttr) { geneve.FlowBased = true case nl.IFLA_GENEVE_INNER_PROTO_INHERIT: geneve.InnerProtoInherit = true + case nl.IFLA_GENEVE_PORT_RANGE: + buf := bytes.NewBuffer(datum.Value[0:4]) + var pr genevePortRange + if binary.Read(buf, binary.BigEndian, &pr) == nil { + geneve.PortLow = int(pr.Lo) + geneve.PortHigh = int(pr.Hi) + } } } } @@ -3900,11 +4089,27 @@ func parseTuntapData(link Link, data []syscall.NetlinkRouteAttr) { tuntap.Group = native.Uint32(datum.Value) case nl.IFLA_TUN_TYPE: tuntap.Mode = TuntapMode(uint8(datum.Value[0])) + case nl.IFLA_TUN_PI: + if datum.Value[0] == 0 { + tuntap.Flags |= TUNTAP_NO_PI + } + case nl.IFLA_TUN_VNET_HDR: + if datum.Value[0] == 1 { + tuntap.Flags |= TUNTAP_VNET_HDR + } case nl.IFLA_TUN_PERSIST: tuntap.NonPersist = false if uint8(datum.Value[0]) == 0 { tuntap.NonPersist = true } + case nl.IFLA_TUN_MULTI_QUEUE: + if datum.Value[0] == 1 { + tuntap.Flags |= TUNTAP_MULTI_QUEUE + } + case nl.IFLA_TUN_NUM_QUEUES: + tuntap.Queues = int(native.Uint32(datum.Value)) + case nl.IFLA_TUN_NUM_DISABLED_QUEUES: + tuntap.DisabledQueues = int(native.Uint32(datum.Value)) } } } diff --git a/go-controller/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go index 310bd33d8d..1a5da82c2f 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go @@ -1,5 +1,14 @@ package netlink +import ( + "fmt" + "os" + "strings" + "syscall" + + "golang.org/x/sys/unix" +) + // ideally golang.org/x/sys/unix would define IfReq but it only has // IFNAMSIZ, hence this minimalistic implementation const ( @@ -7,8 +16,136 @@ const ( IFNAMSIZ = 16 ) +const TUN = "/dev/net/tun" + type ifReq struct { Name [IFNAMSIZ]byte Flags uint16 pad [SizeOfIfReq - IFNAMSIZ - 2]byte } + +// AddQueues opens and attaches multiple queue file descriptors to an existing +// TUN/TAP interface in multi-queue mode. +// +// It performs TUNSETIFF ioctl on each opened file descriptor with the current +// tuntap configuration. Each resulting fd is set to non-blocking mode and +// returned as *os.File. +// +// If the interface was created with a name pattern (e.g. "tap%d"), +// the first successful TUNSETIFF call will return the resolved name, +// which is saved back into tuntap.Name. +// +// This method assumes that the interface already exists and is in multi-queue mode. +// The returned FDs are also appended to tuntap.Fds and tuntap.Queues is updated. +// +// It is the caller's responsibility to close the FDs when they are no longer needed. +func (tuntap *Tuntap) AddQueues(count int) ([]*os.File, error) { + if tuntap.Mode < unix.IFF_TUN || tuntap.Mode > unix.IFF_TAP { + return nil, fmt.Errorf("Tuntap.Mode %v unknown", tuntap.Mode) + } + if tuntap.Flags&TUNTAP_MULTI_QUEUE == 0 { + return nil, fmt.Errorf("TUNTAP_MULTI_QUEUE not set") + } + if count < 1 { + return nil, fmt.Errorf("count must be >= 1") + } + + req, err := unix.NewIfreq(tuntap.Name) + if err != nil { + return nil, err + } + req.SetUint16(uint16(tuntap.Mode) | uint16(tuntap.Flags)) + + var fds []*os.File + for i := 0; i < count; i++ { + localReq := req + fd, err := unix.Open(TUN, os.O_RDWR|syscall.O_CLOEXEC, 0) + if err != nil { + cleanupFds(fds) + return nil, err + } + + err = unix.IoctlIfreq(fd, unix.TUNSETIFF, req) + if err != nil { + // close the new fd + unix.Close(fd) + // and the already opened ones + cleanupFds(fds) + return nil, fmt.Errorf("tuntap IOCTL TUNSETIFF failed [%d]: %w", i, err) + } + + // Set the tun device to non-blocking before use. The below comment + // taken from: + // + // https://github.com/mistsys/tuntap/commit/161418c25003bbee77d085a34af64d189df62bea + // + // Note there is a complication because in go, if a device node is + // opened, go sets it to use nonblocking I/O. However a /dev/net/tun + // doesn't work with epoll until after the TUNSETIFF ioctl has been + // done. So we open the unix fd directly, do the ioctl, then put the + // fd in nonblocking mode, an then finally wrap it in a os.File, + // which will see the nonblocking mode and add the fd to the + // pollable set, so later on when we Read() from it blocked the + // calling thread in the kernel. + // + // See + // https://github.com/golang/go/issues/30426 + // which got exposed in go 1.13 by the fix to + // https://github.com/golang/go/issues/30624 + err = unix.SetNonblock(fd, true) + if err != nil { + cleanupFds(fds) + return nil, fmt.Errorf("tuntap set to non-blocking failed [%d]: %w", i, err) + } + + // create the file from the file descriptor and store it + file := os.NewFile(uintptr(fd), TUN) + fds = append(fds, file) + + // 1) we only care for the name of the first tap in the multi queue set + // 2) if the original name was empty, the localReq has now the actual name + // + // In addition: + // This ensures that the link name is always identical to what the kernel returns. + // Not only in case of an empty name, but also when using name templates. + // e.g. when the provided name is "tap%d", the kernel replaces %d with the next available number. + if i == 0 { + tuntap.Name = strings.Trim(localReq.Name(), "\x00") + } + } + + tuntap.Fds = append(tuntap.Fds, fds...) + tuntap.Queues = len(tuntap.Fds) + return fds, nil +} + +// RemoveQueues closes the given TAP queue file descriptors and removes them +// from the tuntap.Fds list. +// +// This is a logical counterpart to AddQueues and allows releasing specific queues +// (e.g., to simulate queue failure or perform partial detach). +// +// The method updates tuntap.Queues to reflect the number of remaining active queues. +// +// It is safe to call with a subset of tuntap.Fds, but the caller must ensure +// that the passed *os.File descriptors belong to this interface. +func (tuntap *Tuntap) RemoveQueues(fds ...*os.File) error { + toClose := make(map[uintptr]struct{}, len(fds)) + for _, fd := range fds { + toClose[fd.Fd()] = struct{}{} + } + + var newFds []*os.File + for _, fd := range tuntap.Fds { + if _, shouldClose := toClose[fd.Fd()]; shouldClose { + if err := fd.Close(); err != nil { + return fmt.Errorf("failed to close queue fd %d: %w", fd.Fd(), err) + } + tuntap.Queues-- + } else { + newFds = append(newFds, fd) + } + } + tuntap.Fds = newFds + return nil +} diff --git a/go-controller/vendor/github.com/vishvananda/netlink/neigh.go b/go-controller/vendor/github.com/vishvananda/netlink/neigh.go index 32d722e885..a96e5846e6 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/neigh.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/neigh.go @@ -19,6 +19,14 @@ type Neigh struct { Vlan int VNI int MasterIndex int + + // These values are expressed as "clock ticks ago". To + // convert these clock ticks to seconds divide by sysconf(_SC_CLK_TCK). + // When _SC_CLK_TCK is 100, for example, the ndm_* times are expressed + // in centiseconds. + Confirmed uint32 // The last time ARP/ND succeeded OR higher layer confirmation was received + Used uint32 // The last time ARP/ND took place for this neighbor + Updated uint32 // The time when the current NUD state was entered } // String returns $ip/$hwaddr $label diff --git a/go-controller/vendor/github.com/vishvananda/netlink/neigh_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/neigh_linux.go index 1c6f2958ae..f4dd83532e 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/neigh_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -349,6 +349,10 @@ func NeighDeserialize(m []byte) (*Neigh, error) { neigh.VNI = int(native.Uint32(attr.Value[0:4])) case NDA_MASTER: neigh.MasterIndex = int(native.Uint32(attr.Value[0:4])) + case NDA_CACHEINFO: + neigh.Confirmed = native.Uint32(attr.Value[0:4]) + neigh.Used = native.Uint32(attr.Value[4:8]) + neigh.Updated = native.Uint32(attr.Value[8:12]) } } diff --git a/go-controller/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/go-controller/vendor/github.com/vishvananda/netlink/netlink_unspecified.go index da12c42a56..9961e158ab 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/netlink_unspecified.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/netlink_unspecified.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package netlink @@ -144,6 +145,10 @@ func LinkSetGROIPv4MaxSize(link Link, maxSize int) error { return ErrNotImplemented } +func LinkSetIP6AddrGenMode(link Link, mode int) error { + return ErrNotImplemented +} + func LinkAdd(link Link) error { return ErrNotImplemented } diff --git a/go-controller/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go index 34e78ba8da..2441d1ca96 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go @@ -26,6 +26,14 @@ const ( IFLA_BRIDGE_FLAGS = iota IFLA_BRIDGE_MODE IFLA_BRIDGE_VLAN_INFO + IFLA_BRIDGE_VLAN_TUNNEL_INFO +) + +const ( + IFLA_BRIDGE_VLAN_TUNNEL_UNSPEC = iota + IFLA_BRIDGE_VLAN_TUNNEL_ID + IFLA_BRIDGE_VLAN_TUNNEL_VID + IFLA_BRIDGE_VLAN_TUNNEL_FLAGS ) const ( @@ -41,6 +49,11 @@ const ( // __u16 vid; // }; +type TunnelInfo struct { + TunId uint32 + Vid uint16 +} + type BridgeVlanInfo struct { Flags uint16 Vid uint16 diff --git a/go-controller/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/nl/link_linux.go index 6dfa16cc28..716c2a9a13 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/nl/link_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -31,6 +31,20 @@ const ( IFLA_VLAN_MAX = IFLA_VLAN_PROTOCOL ) +const ( + IFLA_VLAN_QOS_UNSPEC = iota + IFLA_VLAN_QOS_MAPPING + IFLA_VLAN_QOS_MAX = IFLA_VLAN_QOS_MAPPING +) + +const ( + VLAN_FLAG_REORDER_HDR = 1 << iota + VLAN_FLAG_GVRP + VLAN_FLAG_LOOSE_BINDING + VLAN_FLAG_MVRP + VLAN_FLAG_BRIDGE_BINDING +) + const ( IFLA_NETKIT_UNSPEC = iota IFLA_NETKIT_PEER_INFO @@ -234,6 +248,7 @@ const ( IFLA_GENEVE_TTL_INHERIT IFLA_GENEVE_DF IFLA_GENEVE_INNER_PROTO_INHERIT + IFLA_GENEVE_PORT_RANGE IFLA_GENEVE_MAX = IFLA_GENEVE_INNER_PROTO_INHERIT ) @@ -818,3 +833,10 @@ const ( IFLA_BAREUDP_MULTIPROTO_MODE IFLA_BAREUDP_MAX = IFLA_BAREUDP_MULTIPROTO_MODE ) + +const ( + IN6_ADDR_GEN_MODE_EUI64 = iota + IN6_ADDR_GEN_MODE_NONE + IN6_ADDR_GEN_MODE_STABLE_PRIVACY + IN6_ADDR_GEN_MODE_RANDOM +) diff --git a/go-controller/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index 4d2732a9e8..f2dc7abb87 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -789,7 +789,7 @@ func executeInNetns(newNs, curNs netns.NsHandle) (func(), error) { // Returns the netlink socket on which Receive() method can be called // to retrieve the messages from the kernel. func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) { - fd, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW, protocol) + fd, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW|unix.SOCK_CLOEXEC, protocol) if err != nil { return nil, err } diff --git a/go-controller/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go index 7f49125cff..8ee0428db8 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/nl/parse_attr_linux.go @@ -17,7 +17,7 @@ func ParseAttributes(data []byte) <-chan Attribute { go func() { i := 0 - for i+4 < len(data) { + for i+4 <= len(data) { length := int(native.Uint16(data[i : i+2])) attrType := native.Uint16(data[i+2 : i+4]) diff --git a/go-controller/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go index ce43ee1550..0244836288 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go @@ -9,31 +9,41 @@ const ( ) const ( - RDMA_NLDEV_CMD_GET = 1 - RDMA_NLDEV_CMD_SET = 2 - RDMA_NLDEV_CMD_NEWLINK = 3 - RDMA_NLDEV_CMD_DELLINK = 4 - RDMA_NLDEV_CMD_SYS_GET = 6 - RDMA_NLDEV_CMD_SYS_SET = 7 + RDMA_NLDEV_CMD_GET = 1 + RDMA_NLDEV_CMD_SET = 2 + RDMA_NLDEV_CMD_NEWLINK = 3 + RDMA_NLDEV_CMD_DELLINK = 4 + RDMA_NLDEV_CMD_SYS_GET = 6 + RDMA_NLDEV_CMD_SYS_SET = 7 + RDMA_NLDEV_CMD_RES_GET = 9 + RDMA_NLDEV_CMD_STAT_GET = 17 ) const ( - RDMA_NLDEV_ATTR_DEV_INDEX = 1 - RDMA_NLDEV_ATTR_DEV_NAME = 2 - RDMA_NLDEV_ATTR_PORT_INDEX = 3 - RDMA_NLDEV_ATTR_CAP_FLAGS = 4 - RDMA_NLDEV_ATTR_FW_VERSION = 5 - RDMA_NLDEV_ATTR_NODE_GUID = 6 - RDMA_NLDEV_ATTR_SYS_IMAGE_GUID = 7 - RDMA_NLDEV_ATTR_SUBNET_PREFIX = 8 - RDMA_NLDEV_ATTR_LID = 9 - RDMA_NLDEV_ATTR_SM_LID = 10 - RDMA_NLDEV_ATTR_LMC = 11 - RDMA_NLDEV_ATTR_PORT_STATE = 12 - RDMA_NLDEV_ATTR_PORT_PHYS_STATE = 13 - RDMA_NLDEV_ATTR_DEV_NODE_TYPE = 14 - RDMA_NLDEV_ATTR_NDEV_NAME = 51 - RDMA_NLDEV_ATTR_LINK_TYPE = 65 - RDMA_NLDEV_SYS_ATTR_NETNS_MODE = 66 - RDMA_NLDEV_NET_NS_FD = 68 + RDMA_NLDEV_ATTR_DEV_INDEX = 1 + RDMA_NLDEV_ATTR_DEV_NAME = 2 + RDMA_NLDEV_ATTR_PORT_INDEX = 3 + RDMA_NLDEV_ATTR_CAP_FLAGS = 4 + RDMA_NLDEV_ATTR_FW_VERSION = 5 + RDMA_NLDEV_ATTR_NODE_GUID = 6 + RDMA_NLDEV_ATTR_SYS_IMAGE_GUID = 7 + RDMA_NLDEV_ATTR_SUBNET_PREFIX = 8 + RDMA_NLDEV_ATTR_LID = 9 + RDMA_NLDEV_ATTR_SM_LID = 10 + RDMA_NLDEV_ATTR_LMC = 11 + RDMA_NLDEV_ATTR_PORT_STATE = 12 + RDMA_NLDEV_ATTR_PORT_PHYS_STATE = 13 + RDMA_NLDEV_ATTR_DEV_NODE_TYPE = 14 + RDMA_NLDEV_ATTR_RES_SUMMARY = 15 + RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY = 16 + RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME = 17 + RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR = 18 + RDMA_NLDEV_ATTR_NDEV_NAME = 51 + RDMA_NLDEV_ATTR_LINK_TYPE = 65 + RDMA_NLDEV_SYS_ATTR_NETNS_MODE = 66 + RDMA_NLDEV_NET_NS_FD = 68 + RDMA_NLDEV_ATTR_STAT_HWCOUNTERS = 80 + RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY = 81 + RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME = 82 + RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE = 83 ) diff --git a/go-controller/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go index 8172b8471f..b92991de71 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go @@ -13,6 +13,7 @@ const ( SEG6_LOCAL_IIF SEG6_LOCAL_OIF SEG6_LOCAL_BPF + SEG6_LOCAL_VRFTABLE __SEG6_LOCAL_MAX ) const ( diff --git a/go-controller/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/nl/tc_linux.go index b8f500792b..67666816e0 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/nl/tc_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/nl/tc_linux.go @@ -77,6 +77,17 @@ const ( TCA_ACT_MAX ) +const ( + TCA_ACT_SAMPLE_UNSPEC = iota + TCA_ACT_SAMPLE_TM + TCA_ACT_SAMPLE_PARMS + TCA_ACT_SAMPLE_RATE + TCA_ACT_SAMPLE_TRUNC_SIZE + TCA_ACT_SAMPLE_PSAMPLE_GROUP + TCA_ACT_SAMPLE_PAD + TCA_ACT_SAMPLE_MAX +) + const ( TCA_PRIO_UNSPEC = iota TCA_PRIO_MQ @@ -1112,6 +1123,13 @@ const ( TCA_FLOWER_KEY_ENC_OPTS TCA_FLOWER_KEY_ENC_OPTS_MASK + TCA_FLOWER_IN_HW_COUNT + + TCA_FLOWER_KEY_PORT_SRC_MIN /* be16 */ + TCA_FLOWER_KEY_PORT_SRC_MAX /* be16 */ + TCA_FLOWER_KEY_PORT_DST_MIN /* be16 */ + TCA_FLOWER_KEY_PORT_DST_MAX /* be16 */ + __TCA_FLOWER_MAX ) @@ -1127,11 +1145,11 @@ const TCA_CLS_FLAGS_SKIP_SW = 1 << 1 /* don't use filter in SW */ // }; type TcSfqQopt struct { - Quantum uint8 + Quantum uint32 Perturb int32 Limit uint32 - Divisor uint8 - Flows uint8 + Divisor uint32 + Flows uint32 } func (x *TcSfqQopt) Len() int { @@ -1569,7 +1587,7 @@ func (p *TcPedit) SetIPv6Dst(ip6 net.IP) { } func (p *TcPedit) SetIPv4Src(ip net.IP) { - u32 := NativeEndian().Uint32(ip[:4]) + u32 := NativeEndian().Uint32(ip.To4()) tKey := TcPeditKey{} tKeyEx := TcPeditKeyEx{} @@ -1585,7 +1603,7 @@ func (p *TcPedit) SetIPv4Src(ip net.IP) { } func (p *TcPedit) SetIPv4Dst(ip net.IP) { - u32 := NativeEndian().Uint32(ip[:4]) + u32 := NativeEndian().Uint32(ip.To4()) tKey := TcPeditKey{} tKeyEx := TcPeditKeyEx{} diff --git a/go-controller/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go index cdb318ba55..6cfd8f9e0c 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go @@ -78,10 +78,14 @@ const ( XFRMA_PROTO /* __u8 */ XFRMA_ADDRESS_FILTER /* struct xfrm_address_filter */ XFRMA_PAD - XFRMA_OFFLOAD_DEV /* struct xfrm_state_offload */ - XFRMA_SET_MARK /* __u32 */ - XFRMA_SET_MARK_MASK /* __u32 */ - XFRMA_IF_ID /* __u32 */ + XFRMA_OFFLOAD_DEV /* struct xfrm_state_offload */ + XFRMA_SET_MARK /* __u32 */ + XFRMA_SET_MARK_MASK /* __u32 */ + XFRMA_IF_ID /* __u32 */ + XFRMA_MTIMER_THRESH /* __u32 in seconds for input SA */ + XFRMA_SA_DIR /* __u8 */ + XFRMA_NAT_KEEPALIVE_INTERVAL /* __u32 in seconds for NAT keepalive */ + XFRMA_SA_PCPU /* __u32 */ XFRMA_MAX = iota - 1 ) diff --git a/go-controller/vendor/github.com/vishvananda/netlink/protinfo.go b/go-controller/vendor/github.com/vishvananda/netlink/protinfo.go index 0163cba3a8..02f066e0a0 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/protinfo.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/protinfo.go @@ -16,6 +16,7 @@ type Protinfo struct { ProxyArpWiFi bool Isolated bool NeighSuppress bool + VlanTunnel bool } // String returns a list of enabled flags @@ -55,6 +56,9 @@ func (prot *Protinfo) String() string { if prot.NeighSuppress { boolStrings = append(boolStrings, "NeighSuppress") } + if prot.VlanTunnel { + boolStrings = append(boolStrings, "VlanTunnel") + } return strings.Join(boolStrings, " ") } diff --git a/go-controller/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/protinfo_linux.go index aa51e3b470..c7d7b566e2 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/protinfo_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/protinfo_linux.go @@ -77,7 +77,10 @@ func parseProtinfo(infos []syscall.NetlinkRouteAttr) (pi Protinfo) { pi.Isolated = byteToBool(info.Value[0]) case nl.IFLA_BRPORT_NEIGH_SUPPRESS: pi.NeighSuppress = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_VLAN_TUNNEL: + pi.VlanTunnel = byteToBool(info.Value[0]) } + } return } diff --git a/go-controller/vendor/github.com/vishvananda/netlink/qdisc.go b/go-controller/vendor/github.com/vishvananda/netlink/qdisc.go index 067743d390..1cde43c94d 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/qdisc.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/qdisc.go @@ -374,10 +374,10 @@ func (qdisc *FqCodel) Type() string { type Sfq struct { QdiscAttrs // TODO: Only the simplified options for SFQ are handled here. Support for the extended one can be added later. - Quantum uint8 - Perturb uint8 + Quantum uint32 + Perturb int32 Limit uint32 - Divisor uint8 + Divisor uint32 } func (sfq *Sfq) String() string { diff --git a/go-controller/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/qdisc_linux.go index 22cf0e5825..0a2a5891cc 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -321,7 +321,7 @@ func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { case *Sfq: opt := nl.TcSfqQoptV1{} opt.TcSfqQopt.Quantum = qdisc.Quantum - opt.TcSfqQopt.Perturb = int32(qdisc.Perturb) + opt.TcSfqQopt.Perturb = qdisc.Perturb opt.TcSfqQopt.Limit = qdisc.Limit opt.TcSfqQopt.Divisor = qdisc.Divisor @@ -683,7 +683,7 @@ func parseSfqData(qdisc Qdisc, value []byte) error { sfq := qdisc.(*Sfq) opt := nl.DeserializeTcSfqQoptV1(value) sfq.Quantum = opt.TcSfqQopt.Quantum - sfq.Perturb = uint8(opt.TcSfqQopt.Perturb) + sfq.Perturb = opt.TcSfqQopt.Perturb sfq.Limit = opt.TcSfqQopt.Limit sfq.Divisor = opt.TcSfqQopt.Divisor diff --git a/go-controller/vendor/github.com/vishvananda/netlink/rdma_link_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/rdma_link_linux.go index 9bb7507321..2e774e5aef 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/rdma_link_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/rdma_link_linux.go @@ -18,6 +18,7 @@ type RdmaLinkAttrs struct { FirmwareVersion string NodeGuid string SysImageGuid string + NumPorts uint32 } // Link represents a rdma device from netlink. @@ -69,6 +70,11 @@ func executeOneGetRdmaLink(data []byte) (*RdmaLink, error) { r := bytes.NewReader(value) binary.Read(r, nl.NativeEndian(), &sysGuid) link.Attrs.SysImageGuid = uint64ToGuidString(sysGuid) + case nl.RDMA_NLDEV_ATTR_PORT_INDEX: + var availablePort uint32 + r := bytes.NewReader(value) + binary.Read(r, nl.NativeEndian(), &availablePort) + link.Attrs.NumPorts = availablePort } if (len % 4) != 0 { // Skip pad bytes @@ -345,3 +351,212 @@ func (h *Handle) RdmaLinkAdd(linkName string, linkType string, netdev string) er _, err := req.Execute(unix.NETLINK_RDMA, 0) return err } + +// RdmaResource represents a rdma device resource tracking summaries +type RdmaResource struct { + Index uint32 + Name string + RdmaResourceSummaryEntries map[string]uint64 +} + +// RdmaResourceList list rdma resource tracking information +// Returns all rdma devices resource tracking summary on success or returns error +// otherwise. +// Equivalent to: `rdma resource' +func RdmaResourceList() ([]*RdmaResource, error) { + return pkgHandle.RdmaResourceList() +} + +// RdmaResourceList list rdma resource tracking information +// Returns all rdma devices resource tracking summary on success or returns error +// otherwise. +// Equivalent to: `rdma resource' +func (h *Handle) RdmaResourceList() ([]*RdmaResource, error) { + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_RES_GET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK|unix.NLM_F_DUMP) + + msgs, err := req.Execute(unix.NETLINK_RDMA, 0) + if err != nil { + return nil, err + } + if len(msgs) == 0 { + return nil, fmt.Errorf("No valid response from kernel") + } + var rdmaResources []*RdmaResource + for _, msg := range msgs { + res, err := executeOneGetRdmaResourceList(msg) + if err != nil { + return nil, err + } + rdmaResources = append(rdmaResources, res) + } + return rdmaResources, nil +} + +func parseRdmaCounters(counterType uint16, data []byte) (map[string]uint64, error) { + var counterKeyType, counterValueType uint16 + switch counterType { + case nl.RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY: + counterKeyType = nl.RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME + counterValueType = nl.RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR + case nl.RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY: + counterKeyType = nl.RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME + counterValueType = nl.RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE + default: + return nil, fmt.Errorf("Invalid counter type: %d", counterType) + } + counters := make(map[string]uint64) + reader := bytes.NewReader(data) + + for reader.Len() >= 4 { + _, attrType, _, value := parseNfAttrTLV(reader) + if attrType != counterType { + return nil, fmt.Errorf("Invalid resource summary entry type; %d", attrType) + } + + summaryReader := bytes.NewReader(value) + for summaryReader.Len() >= 4 { + _, attrType, len, value := parseNfAttrTLV(summaryReader) + if attrType != counterKeyType { + return nil, fmt.Errorf("Invalid resource summary entry name type; %d", attrType) + } + name := string(value[0 : len-1]) + // Skip pad bytes + if (len % 4) != 0 { + summaryReader.Seek(int64(4-(len%4)), seekCurrent) + } + _, attrType, len, value = parseNfAttrTLV(summaryReader) + if attrType != counterValueType { + return nil, fmt.Errorf("Invalid resource summary entry value type; %d", attrType) + } + counters[name] = native.Uint64(value) + } + } + return counters, nil +} + +func executeOneGetRdmaResourceList(data []byte) (*RdmaResource, error) { + var res RdmaResource + reader := bytes.NewReader(data) + for reader.Len() >= 4 { + _, attrType, len, value := parseNfAttrTLV(reader) + + switch attrType { + case nl.RDMA_NLDEV_ATTR_DEV_INDEX: + var Index uint32 + r := bytes.NewReader(value) + binary.Read(r, nl.NativeEndian(), &Index) + res.Index = Index + case nl.RDMA_NLDEV_ATTR_DEV_NAME: + res.Name = string(value[0 : len-1]) + case nl.RDMA_NLDEV_ATTR_RES_SUMMARY: + var err error + res.RdmaResourceSummaryEntries, err = parseRdmaCounters(nl.RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY, value) + if err != nil { + return nil, err + } + } + if (len % 4) != 0 { + // Skip pad bytes + reader.Seek(int64(4-(len%4)), seekCurrent) + } + } + return &res, nil +} + +// RdmaPortStatistic represents a rdma port statistic counter +type RdmaPortStatistic struct { + PortIndex uint32 + Statistics map[string]uint64 +} + +// RdmaDeviceStatistic represents a rdma device statistic counter +type RdmaDeviceStatistic struct { + RdmaPortStatistics []*RdmaPortStatistic +} + +// RdmaStatistic get rdma device statistic counters +// Returns rdma device statistic counters on success or returns error +// otherwise. +// Equivalent to: `rdma statistic show link [DEV]' +func RdmaStatistic(link *RdmaLink) (*RdmaDeviceStatistic, error) { + return pkgHandle.RdmaStatistic(link) +} + +// RdmaStatistic get rdma device statistic counters +// Returns rdma device statistic counters on success or returns error +// otherwise. +// Equivalent to: `rdma statistic show link [DEV]' +func (h *Handle) RdmaStatistic(link *RdmaLink) (*RdmaDeviceStatistic, error) { + rdmaLinkStatistic := make([]*RdmaPortStatistic, 0) + for portIndex := uint32(1); portIndex <= link.Attrs.NumPorts; portIndex++ { + portStatistic, err := h.RdmaPortStatisticList(link, portIndex) + if err != nil { + return nil, err + } + rdmaLinkStatistic = append(rdmaLinkStatistic, portStatistic) + } + return &RdmaDeviceStatistic{RdmaPortStatistics: rdmaLinkStatistic}, nil +} + +// RdmaPortStatisticList get rdma device port statistic counters +// Returns rdma device port statistic counters on success or returns error +// otherwise. +// Equivalent to: `rdma statistic show link [DEV/PORT]' +func RdmaPortStatisticList(link *RdmaLink, port uint32) (*RdmaPortStatistic, error) { + return pkgHandle.RdmaPortStatisticList(link, port) +} + +// RdmaPortStatisticList get rdma device port statistic counters +// Returns rdma device port statistic counters on success or returns error +// otherwise. +// Equivalent to: `rdma statistic show link [DEV/PORT]' +func (h *Handle) RdmaPortStatisticList(link *RdmaLink, port uint32) (*RdmaPortStatistic, error) { + proto := getProtoField(nl.RDMA_NL_NLDEV, nl.RDMA_NLDEV_CMD_STAT_GET) + req := h.newNetlinkRequest(proto, unix.NLM_F_ACK|unix.NLM_F_REQUEST) + b := make([]byte, 4) + native.PutUint32(b, link.Attrs.Index) + data := nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_DEV_INDEX, b) + req.AddData(data) + + b = make([]byte, 4) + native.PutUint32(b, port) + data = nl.NewRtAttr(nl.RDMA_NLDEV_ATTR_PORT_INDEX, b) + req.AddData(data) + + msgs, err := req.Execute(unix.NETLINK_RDMA, 0) + if err != nil { + return nil, err + } + if len(msgs) != 1 { + return nil, fmt.Errorf("No valid response from kernel") + } + return executeOneGetRdmaPortStatistics(msgs[0]) +} + +func executeOneGetRdmaPortStatistics(data []byte) (*RdmaPortStatistic, error) { + var stat RdmaPortStatistic + reader := bytes.NewReader(data) + for reader.Len() >= 4 { + _, attrType, len, value := parseNfAttrTLV(reader) + + switch attrType { + case nl.RDMA_NLDEV_ATTR_PORT_INDEX: + var Index uint32 + r := bytes.NewReader(value) + binary.Read(r, nl.NativeEndian(), &Index) + stat.PortIndex = Index + case nl.RDMA_NLDEV_ATTR_STAT_HWCOUNTERS: + var err error + stat.Statistics, err = parseRdmaCounters(nl.RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY, value) + if err != nil { + return nil, err + } + } + if (len % 4) != 0 { + // Skip pad bytes + reader.Seek(int64(4-(len%4)), seekCurrent) + } + } + return &stat, nil +} diff --git a/go-controller/vendor/github.com/vishvananda/netlink/route.go b/go-controller/vendor/github.com/vishvananda/netlink/route.go index 1b4555d5c5..47a57c24c8 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/route.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/route.go @@ -45,7 +45,7 @@ type Encap interface { Equal(Encap) bool } -//Protocol describe what was the originator of the route +// Protocol describe what was the originator of the route type RouteProtocol int // Route represents a netlink route. @@ -70,6 +70,7 @@ type Route struct { Via Destination Realm int MTU int + MTULock bool Window int Rtt int RttVar int @@ -81,6 +82,7 @@ type Route struct { InitCwnd int Features int RtoMin int + RtoMinLock bool InitRwnd int QuickACK int Congctl string diff --git a/go-controller/vendor/github.com/vishvananda/netlink/route_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/route_linux.go index 28a132a2f0..9f06673a45 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/route_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/route_linux.go @@ -270,6 +270,7 @@ type SEG6LocalEncap struct { Action int Segments []net.IP // from SRH in seg6_local_lwt Table int // table id for End.T and End.DT6 + VrfTable int // vrftable id for END.DT4 and END.DT6 InAddr net.IP In6Addr net.IP Iif int @@ -305,6 +306,9 @@ func (e *SEG6LocalEncap) Decode(buf []byte) error { case nl.SEG6_LOCAL_TABLE: e.Table = int(native.Uint32(attr.Value[0:4])) e.Flags[nl.SEG6_LOCAL_TABLE] = true + case nl.SEG6_LOCAL_VRFTABLE: + e.VrfTable = int(native.Uint32(attr.Value[0:4])) + e.Flags[nl.SEG6_LOCAL_VRFTABLE] = true case nl.SEG6_LOCAL_NH4: e.InAddr = net.IP(attr.Value[0:4]) e.Flags[nl.SEG6_LOCAL_NH4] = true @@ -361,6 +365,15 @@ func (e *SEG6LocalEncap) Encode() ([]byte, error) { native.PutUint32(attr[4:], uint32(e.Table)) res = append(res, attr...) } + + if e.Flags[nl.SEG6_LOCAL_VRFTABLE] { + attr := make([]byte, 8) + native.PutUint16(attr, 8) + native.PutUint16(attr[2:], nl.SEG6_LOCAL_VRFTABLE) + native.PutUint32(attr[4:], uint32(e.VrfTable)) + res = append(res, attr...) + } + if e.Flags[nl.SEG6_LOCAL_NH4] { attr := make([]byte, 4) native.PutUint16(attr, 8) @@ -413,6 +426,11 @@ func (e *SEG6LocalEncap) String() string { if e.Flags[nl.SEG6_LOCAL_TABLE] { strs = append(strs, fmt.Sprintf("table %d", e.Table)) } + + if e.Flags[nl.SEG6_LOCAL_VRFTABLE] { + strs = append(strs, fmt.Sprintf("vrftable %d", e.VrfTable)) + } + if e.Flags[nl.SEG6_LOCAL_NH4] { strs = append(strs, fmt.Sprintf("nh4 %s", e.InAddr)) } @@ -477,7 +495,7 @@ func (e *SEG6LocalEncap) Equal(x Encap) bool { if !e.InAddr.Equal(o.InAddr) || !e.In6Addr.Equal(o.In6Addr) { return false } - if e.Action != o.Action || e.Table != o.Table || e.Iif != o.Iif || e.Oif != o.Oif || e.bpf != o.bpf { + if e.Action != o.Action || e.Table != o.Table || e.Iif != o.Iif || e.Oif != o.Oif || e.bpf != o.bpf || e.VrfTable != o.VrfTable { return false } return true @@ -1072,6 +1090,10 @@ func (h *Handle) prepareRouteReq(route *Route, req *nl.NetlinkRequest, msg *nl.R if route.MTU > 0 { b := nl.Uint32Attr(uint32(route.MTU)) metrics = append(metrics, nl.NewRtAttr(unix.RTAX_MTU, b)) + if route.MTULock { + b := nl.Uint32Attr(uint32(1 << unix.RTAX_MTU)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_LOCK, b)) + } } if route.Window > 0 { b := nl.Uint32Attr(uint32(route.Window)) @@ -1116,6 +1138,10 @@ func (h *Handle) prepareRouteReq(route *Route, req *nl.NetlinkRequest, msg *nl.R if route.RtoMin > 0 { b := nl.Uint32Attr(uint32(route.RtoMin)) metrics = append(metrics, nl.NewRtAttr(unix.RTAX_RTO_MIN, b)) + if route.RtoMinLock { + b := nl.Uint32Attr(uint32(1 << unix.RTAX_RTO_MIN)) + metrics = append(metrics, nl.NewRtAttr(unix.RTAX_LOCK, b)) + } } if route.InitRwnd > 0 { b := nl.Uint32Attr(uint32(route.InitRwnd)) @@ -1440,6 +1466,9 @@ func deserializeRoute(m []byte) (Route, error) { switch metric.Attr.Type { case unix.RTAX_MTU: route.MTU = int(native.Uint32(metric.Value[0:4])) + case unix.RTAX_LOCK: + route.MTULock = native.Uint32(metric.Value[0:4]) == uint32(1< 0 { link, err := h.LinkByName(options.Oif) if err != nil { return nil, err } + oifIndex = uint32(link.Attrs().Index) + } else if options.OifIndex > 0 { + oifIndex = uint32(options.OifIndex) + } + if oifIndex > 0 { b := make([]byte, 4) - native.PutUint32(b, uint32(link.Attrs().Index)) + native.PutUint32(b, oifIndex) req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) } diff --git a/go-controller/vendor/github.com/vishvananda/netlink/socket_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/socket_linux.go index 82891bc2e0..ebda532a88 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/socket_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/socket_linux.go @@ -500,7 +500,7 @@ func (h *Handle) UnixSocketDiagInfo() ([]*UnixDiagInfoResp, error) { var attrs []syscall.NetlinkRouteAttr var err error - if attrs, err = nl.ParseRouteAttr(msg[sizeofSocket:]); err != nil { + if attrs, err = nl.ParseRouteAttr(msg[sizeofUnixSocket:]); err != nil { return false } diff --git a/go-controller/vendor/github.com/vishvananda/netlink/xfrm_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/xfrm_linux.go index dd38ed8e08..b603e4c15a 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/xfrm_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/xfrm_linux.go @@ -48,6 +48,14 @@ const ( XFRM_MODE_MAX ) +// SADir is an enum representing an ipsec template direction. +type SADir uint8 + +const ( + XFRM_SA_DIR_IN SADir = iota + 1 + XFRM_SA_DIR_OUT +) + func (m Mode) String() string { switch m { case XFRM_MODE_TRANSPORT: diff --git a/go-controller/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/go-controller/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go index 2f46146514..092ffe97b9 100644 --- a/go-controller/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -113,7 +113,9 @@ type XfrmState struct { Statistics XfrmStateStats Mark *XfrmMark OutputMark *XfrmMark + SADir SADir Ifid int + Pcpunum *uint32 Auth *XfrmStateAlgo Crypt *XfrmStateAlgo Aead *XfrmStateAlgo @@ -126,8 +128,8 @@ type XfrmState struct { } func (sa XfrmState) String() string { - return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %v, Ifid: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t, DontEncapDSCP: %t, OSeqMayWrap: %t, Replay: %v", - sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.Ifid, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN, sa.DontEncapDSCP, sa.OSeqMayWrap, sa.Replay) + return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, OutputMark: %v, SADir: %d, Ifid: %d, Pcpunum: %d, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t, DontEncapDSCP: %t, OSeqMayWrap: %t, Replay: %v", + sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.OutputMark, sa.SADir, sa.Ifid, *sa.Pcpunum, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN, sa.DontEncapDSCP, sa.OSeqMayWrap, sa.Replay) } func (sa XfrmState) Print(stats bool) string { if !stats { @@ -333,11 +335,21 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { req.AddData(out) } + if state.SADir != 0 { + saDir := nl.NewRtAttr(nl.XFRMA_SA_DIR, nl.Uint8Attr(uint8(state.SADir))) + req.AddData(saDir) + } + if state.Ifid != 0 { ifId := nl.NewRtAttr(nl.XFRMA_IF_ID, nl.Uint32Attr(uint32(state.Ifid))) req.AddData(ifId) } + if state.Pcpunum != nil { + pcpuNum := nl.NewRtAttr(nl.XFRMA_SA_PCPU, nl.Uint32Attr(uint32(*state.Pcpunum))) + req.AddData(pcpuNum) + } + _, err := req.Execute(unix.NETLINK_XFRM, 0) return err } @@ -459,6 +471,11 @@ func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState req.AddData(ifId) } + if state.Pcpunum != nil { + pcpuNum := nl.NewRtAttr(nl.XFRMA_SA_PCPU, nl.Uint32Attr(uint32(*state.Pcpunum))) + req.AddData(pcpuNum) + } + resType := nl.XFRM_MSG_NEWSA if nlProto == nl.XFRM_MSG_DELSA { resType = 0 @@ -581,8 +598,13 @@ func parseXfrmState(m []byte, family int) (*XfrmState, error) { if state.OutputMark.Mask == 0xffffffff { state.OutputMark.Mask = 0 } + case nl.XFRMA_SA_DIR: + state.SADir = SADir(attr.Value[0]) case nl.XFRMA_IF_ID: state.Ifid = int(native.Uint32(attr.Value)) + case nl.XFRMA_SA_PCPU: + pcpuNum := native.Uint32(attr.Value) + state.Pcpunum = &pcpuNum case nl.XFRMA_REPLAY_VAL: if state.Replay == nil { state.Replay = new(XfrmReplayState) diff --git a/go-controller/vendor/github.com/vishvananda/netns/.golangci.yml b/go-controller/vendor/github.com/vishvananda/netns/.golangci.yml index 600bef78e2..2b6988f286 100644 --- a/go-controller/vendor/github.com/vishvananda/netns/.golangci.yml +++ b/go-controller/vendor/github.com/vishvananda/netns/.golangci.yml @@ -1,2 +1,26 @@ +linters: + enable: + - errcheck + - errorlint + - gocritic + - gosec + - gosimple + - govet + - gci + - misspell + - nonamedreturns + - staticcheck + - unconvert + - unparam + - unused + - whitespace + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/vishvananda) + run: timeout: 5m diff --git a/go-controller/vendor/github.com/vishvananda/netns/.yamllint.yml b/go-controller/vendor/github.com/vishvananda/netns/.yamllint.yml new file mode 100644 index 0000000000..1b2830cc99 --- /dev/null +++ b/go-controller/vendor/github.com/vishvananda/netns/.yamllint.yml @@ -0,0 +1,9 @@ +--- +extends: default + +rules: + document-start: disable + line-length: disable + truthy: + ignore: | + .github/workflows/*.yml diff --git a/go-controller/vendor/github.com/vishvananda/netns/netns_linux.go b/go-controller/vendor/github.com/vishvananda/netns/netns_linux.go index 2ed7c7e2fa..51c8f4b869 100644 --- a/go-controller/vendor/github.com/vishvananda/netns/netns_linux.go +++ b/go-controller/vendor/github.com/vishvananda/netns/netns_linux.go @@ -26,19 +26,19 @@ const bindMountPath = "/run/netns" /* Bind mount path for named netns */ // Setns sets namespace using golang.org/x/sys/unix.Setns. // // Deprecated: Use golang.org/x/sys/unix.Setns instead. -func Setns(ns NsHandle, nstype int) (err error) { +func Setns(ns NsHandle, nstype int) error { return unix.Setns(int(ns), nstype) } // Set sets the current network namespace to the namespace represented // by NsHandle. -func Set(ns NsHandle) (err error) { +func Set(ns NsHandle) error { return unix.Setns(int(ns), unix.CLONE_NEWNET) } // New creates a new network namespace, sets it as current and returns // a handle to it. -func New() (ns NsHandle, err error) { +func New() (NsHandle, error) { if err := unix.Unshare(unix.CLONE_NEWNET); err != nil { return -1, err } @@ -49,7 +49,7 @@ func New() (ns NsHandle, err error) { // and returns a handle to it func NewNamed(name string) (NsHandle, error) { if _, err := os.Stat(bindMountPath); os.IsNotExist(err) { - err = os.MkdirAll(bindMountPath, 0755) + err = os.MkdirAll(bindMountPath, 0o755) if err != nil { return None(), err } @@ -62,7 +62,7 @@ func NewNamed(name string) (NsHandle, error) { namedPath := path.Join(bindMountPath, name) - f, err := os.OpenFile(namedPath, os.O_CREATE|os.O_EXCL, 0444) + f, err := os.OpenFile(namedPath, os.O_CREATE|os.O_EXCL, 0o444) if err != nil { newNs.Close() return None(), err @@ -217,11 +217,12 @@ func getPidForContainer(id string) (int, error) { id += "*" var pidFile string - if cgroupVer == 1 { + switch cgroupVer { + case 1: pidFile = "tasks" - } else if cgroupVer == 2 { + case 2: pidFile = "cgroup.procs" - } else { + default: return -1, fmt.Errorf("Invalid cgroup version '%d'", cgroupVer) } @@ -247,6 +248,10 @@ func getPidForContainer(id string) (int, error) { filepath.Join(cgroupRoot, "kubepods.slice", "*.slice", "*", "docker-"+id+".scope", pidFile), // Same as above but for Guaranteed QoS filepath.Join(cgroupRoot, "kubepods.slice", "*", "docker-"+id+".scope", pidFile), + // Support for nerdctl + filepath.Join(cgroupRoot, "system.slice", "nerdctl-"+id+".scope", pidFile), + // Support for finch + filepath.Join(cgroupRoot, "..", "systemd", "finch", id, pidFile), } var filename string @@ -276,7 +281,7 @@ func getPidForContainer(id string) (int, error) { pid, err = strconv.Atoi(result[0]) if err != nil { - return pid, fmt.Errorf("Invalid pid '%s': %s", result[0], err) + return pid, fmt.Errorf("Invalid pid '%s': %w", result[0], err) } return pid, nil diff --git a/go-controller/vendor/github.com/vishvananda/netns/netns_others.go b/go-controller/vendor/github.com/vishvananda/netns/netns_others.go index 0489837741..f444f6e77f 100644 --- a/go-controller/vendor/github.com/vishvananda/netns/netns_others.go +++ b/go-controller/vendor/github.com/vishvananda/netns/netns_others.go @@ -3,27 +3,23 @@ package netns -import ( - "errors" -) +import "errors" -var ( - ErrNotImplemented = errors.New("not implemented") -) +var ErrNotImplemented = errors.New("not implemented") // Setns sets namespace using golang.org/x/sys/unix.Setns on Linux. It // is not implemented on other platforms. // // Deprecated: Use golang.org/x/sys/unix.Setns instead. -func Setns(ns NsHandle, nstype int) (err error) { +func Setns(ns NsHandle, nstype int) error { return ErrNotImplemented } -func Set(ns NsHandle) (err error) { +func Set(ns NsHandle) error { return ErrNotImplemented } -func New() (ns NsHandle, err error) { +func New() (NsHandle, error) { return -1, ErrNotImplemented } @@ -51,7 +47,7 @@ func GetFromPid(pid int) (NsHandle, error) { return -1, ErrNotImplemented } -func GetFromThread(pid, tid int) (NsHandle, error) { +func GetFromThread(pid int, tid int) (NsHandle, error) { return -1, ErrNotImplemented } diff --git a/go-controller/vendor/modules.txt b/go-controller/vendor/modules.txt index 9a8ff2e37f..f893d3ac8c 100644 --- a/go-controller/vendor/modules.txt +++ b/go-controller/vendor/modules.txt @@ -253,8 +253,8 @@ github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/ github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/k8s.cni.cncf.io/v1 github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1 github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/utils -# github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc -## explicit; go 1.18 +# github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20250818105516-24ab680f94f3 +## explicit; go 1.23.0 github.com/k8snetworkplumbingwg/sriovnet github.com/k8snetworkplumbingwg/sriovnet/pkg/utils/filesystem github.com/k8snetworkplumbingwg/sriovnet/pkg/utils/netlinkops @@ -450,8 +450,8 @@ github.com/safchain/ethtool # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/spf13/afero v1.9.5 -## explicit; go 1.16 +# github.com/spf13/afero v1.14.0 +## explicit; go 1.23.0 github.com/spf13/afero github.com/spf13/afero/internal/common github.com/spf13/afero/mem @@ -470,11 +470,11 @@ github.com/stretchr/testify/require # github.com/urfave/cli/v2 v2.27.2 ## explicit; go 1.18 github.com/urfave/cli/v2 -# github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa +# github.com/vishvananda/netlink v1.3.1 ## explicit; go 1.12 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl -# github.com/vishvananda/netns v0.0.4 +# github.com/vishvananda/netns v0.0.5 ## explicit; go 1.17 github.com/vishvananda/netns # github.com/x448/float16 v0.8.4 diff --git a/mkdocs.yml b/mkdocs.yml index 7782dd7734..45f5a277e0 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -154,5 +154,6 @@ nav: - User Defined Networks: okeps/okep-5193-user-defined-networks.md - Preconfigured UDN Addresses: okeps/okep-5233-preconfigured-udn-addresses.md - BGP: okeps/okep-5296-bgp.md + - Layer2TransitRouter: okeps/okep-5094-layer2-transit-router.md - Blog: - blog/index.md diff --git a/test/e2e/deploymentconfig/config.go b/test/e2e/deploymentconfig/config.go index 27c788f1b0..8675335453 100644 --- a/test/e2e/deploymentconfig/config.go +++ b/test/e2e/deploymentconfig/config.go @@ -1,20 +1,25 @@ package deploymentconfig import ( + "fmt" + "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig/api" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig/configs/kind" + + "k8s.io/client-go/rest" ) var deployment api.DeploymentConfig -func Set() { +func Set(_ *rest.Config) error { // upstream currently uses KinD as its preferred platform infra, so if we detect KinD, its upstream if kind.IsKind() { deployment = kind.New() } if deployment == nil { - panic("failed to determine the deployment config") + return fmt.Errorf("failed to determine the deployment config") } + return nil } func Get() api.DeploymentConfig { diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 3a1923efce..87323cdb2b 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -572,7 +572,7 @@ func IsGatewayModeLocal(cs kubernetes.Interface) bool { // restartOVNKubeNodePod restarts the ovnkube-node pod from namespace, running on nodeName func restartOVNKubeNodePod(clientset kubernetes.Interface, namespace string, nodeName string) error { ovnKubeNodePods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{ - LabelSelector: "name=ovnkube-node", + LabelSelector: "app=ovnkube-node", FieldSelector: "spec.nodeName=" + nodeName, }) if err != nil { @@ -591,7 +591,7 @@ func restartOVNKubeNodePod(clientset kubernetes.Interface, namespace string, nod framework.Logf("waiting for node %s to have running ovnkube-node pod", nodeName) err = wait.Poll(2*time.Second, 3*time.Minute, func() (bool, error) { ovnKubeNodePods, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{ - LabelSelector: "name=ovnkube-node", + LabelSelector: "app=ovnkube-node", FieldSelector: "spec.nodeName=" + nodeName, }) if err != nil { @@ -632,7 +632,7 @@ func restartOVNKubeNodePodsInParallel(clientset kubernetes.Interface, namespace // getOVNKubePodLogsFiltered retrieves logs from ovnkube-node pods and filters logs lines according to filteringRegexp func getOVNKubePodLogsFiltered(clientset kubernetes.Interface, namespace, nodeName, filteringRegexp string) (string, error) { ovnKubeNodePods, err := clientset.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: "name=ovnkube-node", + LabelSelector: "app=ovnkube-node", FieldSelector: "spec.nodeName=" + nodeName, }) if err != nil { @@ -1298,7 +1298,7 @@ var _ = ginkgo.Describe("e2e ingress traffic validation", func() { framework.ExpectNoError(err) ginkgo.By("Waiting for the endpoints to pop up") - expectedEndpointsNum := len(endPoints) + expectedEndpointsNum := len(endPoints) if isDualStack { expectedEndpointsNum = expectedEndpointsNum * 2 } @@ -1498,7 +1498,7 @@ var _ = ginkgo.Describe("e2e ingress traffic validation", func() { framework.ExpectNoError(err) ginkgo.By("Waiting for the endpoints to pop up") - expectedEndpointsNum := len(endPoints) + expectedEndpointsNum := len(endPoints) if isDualStack { expectedEndpointsNum = expectedEndpointsNum * 2 } @@ -1586,7 +1586,7 @@ var _ = ginkgo.Describe("e2e ingress traffic validation", func() { framework.ExpectNoError(err) ginkgo.By("Waiting for the endpoints to pop up") - expectedEndpointsNum := len(endPoints) + expectedEndpointsNum := len(endPoints) if isDualStack { expectedEndpointsNum = expectedEndpointsNum * 2 } @@ -1737,7 +1737,7 @@ var _ = ginkgo.Describe("e2e ingress traffic validation", func() { framework.ExpectNoError(err) ginkgo.By("Waiting for the endpoints to pop up") - expectedEndpointsNum := len(endPoints) + expectedEndpointsNum := len(endPoints) if isDualStack { expectedEndpointsNum = expectedEndpointsNum * 2 } @@ -1874,7 +1874,7 @@ var _ = ginkgo.Describe("e2e ingress to host-networked pods traffic validation", nodeTCPPort, nodeUDPPort := nodePortsFromService(np) ginkgo.By("Waiting for the endpoints to pop up") - expectedEndpointsNum := len(endPoints) + expectedEndpointsNum := len(endPoints) if isDualStack { expectedEndpointsNum = expectedEndpointsNum * 2 } @@ -2015,7 +2015,7 @@ var _ = ginkgo.Describe("e2e br-int flow monitoring export validation", func() { setUnsetTemplateContainerEnv(f.ClientSet, ovnKubeNamespace, "daemonset/ovnkube-node", getNodeContainerName(), nil, ovnEnvVar) ovnKubeNodePods, err := f.ClientSet.CoreV1().Pods(ovnKubeNamespace).List(context.TODO(), metav1.ListOptions{ - LabelSelector: "name=ovnkube-node", + LabelSelector: "app=ovnkube-node", }) if err != nil { framework.Failf("could not get ovnkube-node pods: %v", err) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index d96b488297..c6f32f5794 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -44,7 +44,8 @@ var _ = ginkgo.BeforeSuite(func() { framework.ExpectNoError(err) err = infraprovider.Set(config) framework.ExpectNoError(err, "must configure infrastructure provider") - deploymentconfig.Set() + err = deploymentconfig.Set(config) + framework.ExpectNoError(err, "must detect deployment configuration") client, err := clientset.NewForConfig(config) framework.ExpectNoError(err, "k8 clientset is required to list nodes") err = ipalloc.InitPrimaryIPAllocator(client.CoreV1().Nodes()) diff --git a/test/e2e/egressip.go b/test/e2e/egressip.go index 0b782fc97c..b0264ee433 100644 --- a/test/e2e/egressip.go +++ b/test/e2e/egressip.go @@ -1537,7 +1537,7 @@ spec: dbPods, err := e2ekubectl.RunKubectl(ovnKubernetesNamespace, "get", "pods", "-l", "name=ovnkube-db", "-o=jsonpath='{.items..metadata.name}'") dbContainerName := "nb-ovsdb" if isInterconnectEnabled() { - dbPods, err = e2ekubectl.RunKubectl(ovnKubernetesNamespace, "get", "pods", "-l", "name=ovnkube-node", "--field-selector", fmt.Sprintf("spec.nodeName=%s", egress1Node.name), "-o=jsonpath='{.items..metadata.name}'") + dbPods, err = e2ekubectl.RunKubectl(ovnKubernetesNamespace, "get", "pods", "-l", "app=ovnkube-node", "--field-selector", fmt.Sprintf("spec.nodeName=%s", egress1Node.name), "-o=jsonpath='{.items..metadata.name}'") } if err != nil || len(dbPods) == 0 { framework.Failf("Error: Check the OVN DB to ensure no SNATs are added for the standby egressIP, err: %v", err) @@ -1651,7 +1651,7 @@ spec: framework.ExpectNoError(err, "Step 14. Ensure egressIP1 from egressIP object1 and egressIP3 from object2 is correctly transferred to egress2Node, failed: %v", err) if isInterconnectEnabled() { - dbPods, err = e2ekubectl.RunKubectl(ovnKubernetesNamespace, "get", "pods", "-l", "name=ovnkube-node", "--field-selector", fmt.Sprintf("spec.nodeName=%s", egress2Node.name), "-o=jsonpath='{.items..metadata.name}'") + dbPods, err = e2ekubectl.RunKubectl(ovnKubernetesNamespace, "get", "pods", "-l", "app=ovnkube-node", "--field-selector", fmt.Sprintf("spec.nodeName=%s", egress2Node.name), "-o=jsonpath='{.items..metadata.name}'") } if err != nil || len(dbPods) == 0 { framework.Failf("Error: Check the OVN DB to ensure no SNATs are added for the standby egressIP, err: %v", err) diff --git a/test/e2e/feature/features.go b/test/e2e/feature/features.go index e7c3920477..a11c8bbe8e 100644 --- a/test/e2e/feature/features.go +++ b/test/e2e/feature/features.go @@ -25,6 +25,7 @@ var ( OVSCPUPin = New("OVSCPUPin") RouteAdvertisements = New("RouteAdvertisements") Unidle = New("Unidle") + NetworkQos = New("NetworkQos") ) func New(name string) ginkgo.Labels { diff --git a/test/e2e/go.mod b/test/e2e/go.mod index 678b9d5068..5c30e050a3 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -80,7 +80,7 @@ require ( github.com/josharian/native v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 // indirect - github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc // indirect + github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20250818105516-24ab680f94f3 // indirect github.com/karrick/godirwalk v1.17.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -116,15 +116,15 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/afero v1.14.0 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/stretchr/testify v1.10.0 // indirect github.com/urfave/cli/v2 v2.27.2 // indirect github.com/vincent-petithory/dataurl v1.0.0 // indirect - github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa // indirect - github.com/vishvananda/netns v0.0.4 // indirect + github.com/vishvananda/netlink v1.3.1 // indirect + github.com/vishvananda/netns v0.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect diff --git a/test/e2e/go.sum b/test/e2e/go.sum index 4db2592b5a..d55bee2459 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -5,38 +5,23 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= @@ -79,8 +64,6 @@ github.com/clarketm/json v1.17.1 h1:U1IxjqJkJ7bRK4L6dyphmoO840P6bdhPdbbLySourqI= github.com/clarketm/json v1.17.1/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= @@ -137,8 +120,6 @@ github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= @@ -208,14 +189,11 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -223,7 +201,6 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= @@ -241,11 +218,6 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= @@ -259,18 +231,11 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f h1:5CjVwnuUcp5adK4gmY6i72gpVFVnZDP2h5TmPScB6u4= github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= @@ -283,7 +248,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= @@ -319,15 +283,14 @@ github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 h1:Egj1hEVYNXWFlKpgzA github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1/go.mod h1:kEJ4WM849yNmXekuSXLRwb+LaZ9usC06O8JgoAIq+f4= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.7 h1:z4P744DR+PIpkjwXSEc6TvN3L6LVzmUquFgmNm8wSUc= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.7/go.mod h1:CM7HAH5PNuIsqjMN0fGc1ydM74Uj+0VZFhob620nklw= -github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc h1:v6+jUd70AayPbIRgTYUNpnBLG5cBPTY0+10y80CZeMk= -github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc/go.mod h1:jyWzGe6ZtYiPq6ih6aXCOy6mZ49Y9mNyBOLBBXnli+k= +github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20250818105516-24ab680f94f3 h1:uSGOz0UYNPduUVXLdAthKdRjIaaCUxN8j9R30Kx0JxQ= +github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20250818105516-24ab680f94f3/go.mod h1:UnAcraX3CxamBrn9H/xCLngKOquy5DyGWiupn05x9Ag= github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -429,7 +392,6 @@ github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -452,8 +414,8 @@ github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91/go.mod h1:qIWCT github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= +github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -484,17 +446,15 @@ github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI= github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM= github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI= github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= -github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa h1:iAhToRwOrdk+pKzclvLM7nKZhsg8f7dVrgkFccDUbUw= -github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= -github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0= +github.com/vishvananda/netlink v1.3.1/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -505,8 +465,6 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= @@ -543,9 +501,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -572,7 +528,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -581,8 +536,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -597,7 +550,6 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -607,25 +559,14 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -637,10 +578,6 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -649,10 +586,7 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= @@ -681,26 +615,14 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -731,7 +653,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -773,28 +694,11 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= @@ -815,23 +719,12 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -849,26 +742,9 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= @@ -881,14 +757,7 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -900,7 +769,6 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= @@ -946,7 +814,6 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.22.7/go.mod h1:7hejA1BgBEiSsWljUyRkIjj+AISXO16IwsaDgFjJsQE= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= diff --git a/test/e2e/images/images.go b/test/e2e/images/images.go index 472705dddd..bd62992901 100644 --- a/test/e2e/images/images.go +++ b/test/e2e/images/images.go @@ -1,10 +1,21 @@ package images -import "os" +import ( + "os" + + "k8s.io/kubernetes/test/utils/image" +) var ( - agnHost = "registry.k8s.io/e2e-test-images/agnhost:2.53" - iperf3 = "quay.io/sronanrh/iperf:latest" + // We limit the set of images used by e2e to reduce duplication and to allow us to provide offline mirroring of images + // for customers and restricted test environments. + // Ideally, every image used in e2e must be part of this package. + // New test images should ideally be sourced from the upstream k8s.io/kubernetes/test/utils/image package. + // Failing to find an image from upstream k8s, please get community approval because downstream consumers must + // pre-approve new images. + agnHost = image.GetE2EImage(image.Agnhost) + // FIXME: iperf3 image should not be retrieved from a users repo and should not have latest tag + iperf3 = "quay.io/sronanrh/iperf:latest" ) func init() { diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index 507cc6d086..2dfaf5e2e2 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -1548,9 +1548,9 @@ fi cudn *udnv1.ClusterUserDefinedNetwork vm *kubevirtv1.VirtualMachine vmi *kubevirtv1.VirtualMachineInstance - cidrIPv4 = "10.128.0.0/24" + cidrIPv4 = "172.31.0.0/24" // subnet in private range 172.16.0.0/12 (rfc1918) cidrIPv6 = "2010:100:200::0/60" - staticIPv4 = "10.128.0.101" + staticIPv4 = "172.31.0.101" staticIPv6 = "2010:100:200::101" staticMAC = "02:00:00:00:00:01" restart = testCommand{ @@ -2094,7 +2094,7 @@ ip route add %[3]s via %[4]s Context("with kubevirt VM using layer2 UDPN", Ordered, func() { var ( podName = "virt-launcher-vm1" - cidrIPv4 = "10.128.0.0/24" + cidrIPv4 = "172.31.0.0/24" cidrIPv6 = "2010:100:200::/60" primaryUDNNetworkStatus nadapi.NetworkStatus virtLauncherCommand = func(command string) (string, error) { @@ -2232,9 +2232,9 @@ ip route add %[3]s via %[4]s Expect(removeImagesInNodes(kubevirt.FedoraContainerDiskImage)).To(Succeed()) }) var ( - ipv4CIDR = "10.128.0.0/24" + ipv4CIDR = "172.31.0.0/24" ipv6CIDR = "2010:100:200::0/60" - vmiIPv4 = "10.128.0.100/24" + vmiIPv4 = "172.31.0.100/24" vmiIPv6 = "2010:100:200::100/60" vmiMAC = "0A:58:0A:80:00:64" staticIPsNetworkData = func(ips []string) (string, error) { @@ -2359,4 +2359,121 @@ chpasswd: { expire: False } ) }) + Context("duplicate IP validation", func() { + var ( + cudn *udnv1.ClusterUserDefinedNetwork + duplicateIPv4 = "10.128.0.200" // Static IP that will be used by both VMs + duplicateIPv6 = "2010:100:200::200" + cidrIPv4 = "10.128.0.0/24" + cidrIPv6 = "2010:100:200::0/60" + ) + + BeforeEach(func() { + if !isPreConfiguredUdnAddressesEnabled() { + Skip("ENABLE_PRE_CONF_UDN_ADDR not configured") + } + + l := map[string]string{ + "e2e-framework": fr.BaseName, + RequiredUDNNamespaceLabel: "", + } + ns, err := fr.CreateNamespace(context.TODO(), fr.BaseName, l) + Expect(err).NotTo(HaveOccurred()) + fr.Namespace = ns + namespace = fr.Namespace.Name + + dualCIDRs := filterDualStackCIDRs(fr.ClientSet, []udnv1.CIDR{udnv1.CIDR(cidrIPv4), udnv1.CIDR(cidrIPv6)}) + cudn, _ = kubevirt.GenerateCUDN(namespace, "net1", udnv1.NetworkTopologyLayer2, udnv1.NetworkRolePrimary, dualCIDRs) + createCUDN(cudn) + }) + + createVMWithStaticIP := func(vmName string, staticIPs []string) *kubevirtv1.VirtualMachine { + annotations, err := kubevirt.GenerateAddressesAnnotations("net1", staticIPs) + Expect(err).NotTo(HaveOccurred()) + + vm := fedoraWithTestToolingVM( + nil, // labels + annotations, // annotations with static IP + nil, // nodeSelector + kubevirtv1.NetworkSource{ + Pod: &kubevirtv1.PodNetwork{}, + }, + `#cloud-config +password: fedora +chpasswd: { expire: False } +`, + `version: 2 +ethernets: + eth0: + dhcp4: true + dhcp6: true + ipv6-address-generation: eui64`, + ) + vm.Name = vmName + vm.Namespace = namespace + vm.Spec.Template.Spec.Domain.Devices.Interfaces[0].Bridge = nil + vm.Spec.Template.Spec.Domain.Devices.Interfaces[0].Binding = &kubevirtv1.PluginBinding{Name: "l2bridge"} + return vm + } + + waitForVMReadinessAndVerifyIPs := func(vmName string, expectedIPs []string) { + vmi := &kubevirtv1.VirtualMachineInstance{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: vmName, + }, + } + waitVirtualMachineInstanceReadiness(vmi) + Expect(crClient.Get(context.TODO(), crclient.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + expectedNumberOfAddresses := len(filterDualStackCIDRs(fr.ClientSet, []udnv1.CIDR{udnv1.CIDR(cidrIPv4), udnv1.CIDR(cidrIPv6)})) + actualAddresses := virtualMachineAddressesFromStatus(vmi, expectedNumberOfAddresses) + Expect(actualAddresses).To(ConsistOf(expectedIPs), fmt.Sprintf("VM %s should get the requested static IPs", vmName)) + } + + waitForVMIPodDuplicateIPFailure := func(vmName string) { + Eventually(func() []corev1.Event { + podList, err := fr.ClientSet.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", kubevirtv1.VirtualMachineNameLabel, vmName), + }) + if err != nil || len(podList.Items) == 0 { + return nil + } + + events, err := fr.ClientSet.CoreV1().Events(namespace).List(context.TODO(), metav1.ListOptions{ + FieldSelector: fmt.Sprintf("involvedObject.name=%s", podList.Items[0].Name), + }) + if err != nil { + return nil + } + + return events.Items + }). + WithTimeout(60*time.Second). + WithPolling(2*time.Second). + Should(ContainElement(SatisfyAll( + HaveField("Type", Equal("Warning")), + HaveField("Message", ContainSubstring("provided IP is already allocated")), + )), fmt.Sprintf("VM %s should fail with IP allocation error", vmName)) + } + + It("should fail when creating second VM with duplicate static IP", func() { + staticIPs := filterIPs(fr.ClientSet, duplicateIPv4, duplicateIPv6) + + By("Creating first VM with static IP") + vm1 := createVMWithStaticIP("test-vm-1", staticIPs) + createVirtualMachine(vm1) + waitForVMReadinessAndVerifyIPs(vm1.Name, staticIPs) + + By("Creating second VM with duplicate static IP - should fail") + vm2 := createVMWithStaticIP("test-vm-2", staticIPs) + createVirtualMachine(vm2) + + By("Verifying pod fails with duplicate IP allocation error") + waitForVMIPodDuplicateIPFailure(vm2.Name) + + By("Verifying first VM is still running normally") + waitForVMReadinessAndVerifyIPs(vm1.Name, staticIPs) + }) + }) }) diff --git a/test/e2e/multihoming.go b/test/e2e/multihoming.go index 66d56e363a..5c09665f2a 100644 --- a/test/e2e/multihoming.go +++ b/test/e2e/multihoming.go @@ -4,11 +4,9 @@ import ( "context" "errors" "fmt" - "net/netip" "strings" "time" - "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" @@ -25,8 +23,6 @@ import ( nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" nadclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1" - ipgenerator "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/ip" - util "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" @@ -36,15 +32,21 @@ import ( const ( PolicyForAnnotation = "k8s.v1.cni.cncf.io/policy-for" nodeHostnameKey = "kubernetes.io/hostname" + + externalNetworkSubnetV4 = "172.20.0.0/16" + externalNetworkSubnetV6 = "fd00:20::/64" + + fromHostSubnet = "from-host-subnet" // the test will generate an IP from the host subnet + fromExternalNetwork = "from-external-network" // the test will generate an IP from a subnet that the cluster is not aware of ) var _ = Describe("Multi Homing", feature.MultiHoming, func() { const ( podName = "tinypod" - secondaryNetworkCIDR = "10.128.0.0/16" + secondaryNetworkCIDR = "172.31.0.0/16" // last subnet in private range 172.16.0.0/12 (rfc1918) secondaryNetworkName = "tenant-blue" - secondaryFlatL2IgnoreCIDR = "10.128.0.0/29" - secondaryFlatL2NetworkCIDR = "10.128.0.0/24" + secondaryFlatL2IgnoreCIDR = "172.31.0.0/29" + secondaryFlatL2NetworkCIDR = "172.31.0.0/24" secondaryLocalnetIgnoreCIDR = "60.128.0.0/29" secondaryLocalnetNetworkCIDR = "60.128.0.0/24" netPrefixLengthPerNode = 24 @@ -73,7 +75,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { }) Context("A single pod with an OVN-K secondary network", func() { - ginkgo.DescribeTable("is able to get to the Running phase", func(netConfigParams networkAttachmentConfigParams, podConfig podConfiguration) { + DescribeTable("is able to get to the Running phase", func(netConfigParams networkAttachmentConfigParams, podConfig podConfiguration) { netConfig := newNetworkAttachmentConfig(netConfigParams) netConfig.namespace = f.Namespace.Name @@ -124,7 +126,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { } } }, - ginkgo.Entry( + Entry( "when attaching to an L3 - routed - network", networkAttachmentConfigParams{ cidr: netCIDR(secondaryNetworkCIDR, netPrefixLengthPerNode), @@ -136,7 +138,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { name: podName, }, ), - ginkgo.Entry( + Entry( "when attaching to an L3 - routed - network with IPv6 network", networkAttachmentConfigParams{ cidr: netCIDR(secondaryIPv6CIDR, netPrefixLengthIPv6PerNode), @@ -148,7 +150,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { name: podName, }, ), - ginkgo.Entry( + Entry( "when attaching to an L2 - switched - network", networkAttachmentConfigParams{ cidr: secondaryFlatL2NetworkCIDR, @@ -160,7 +162,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { name: podName, }, ), - ginkgo.Entry( + Entry( "when attaching to an L2 - switched - network featuring `excludeCIDR`s", networkAttachmentConfigParams{ cidr: secondaryFlatL2NetworkCIDR, @@ -173,7 +175,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { name: podName, }, ), - ginkgo.Entry( + Entry( "when attaching to an L2 - switched - network without IPAM", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -184,7 +186,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { name: podName, }, ), - ginkgo.Entry( + Entry( "when attaching to an L2 - switched - network with an IPv6 subnet", networkAttachmentConfigParams{ cidr: secondaryIPv6CIDR, @@ -196,7 +198,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { name: podName, }, ), - ginkgo.Entry( + Entry( "when attaching to an L2 - switched - network with a dual stack configuration", networkAttachmentConfigParams{ cidr: strings.Join([]string{secondaryFlatL2NetworkCIDR, secondaryIPv6CIDR}, ","), @@ -208,7 +210,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { name: podName, }, ), - ginkgo.Entry( + Entry( "when attaching to a localnet - switched - network", networkAttachmentConfigParams{ cidr: secondaryLocalnetNetworkCIDR, @@ -221,7 +223,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { name: podName, }, ), - ginkgo.Entry( + Entry( "when attaching to a localnet - switched - network featuring `excludeCIDR`s", networkAttachmentConfigParams{ cidr: secondaryLocalnetNetworkCIDR, @@ -235,7 +237,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { name: podName, }, ), - ginkgo.Entry( + Entry( "when attaching to a localnet - switched - network without IPAM", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -247,7 +249,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { name: podName, }, ), - ginkgo.Entry( + Entry( "when attaching to a localnet - switched - network with an IPv6 subnet", networkAttachmentConfigParams{ cidr: secondaryIPv6CIDR, @@ -260,7 +262,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { name: podName, }, ), - ginkgo.Entry( + Entry( "when attaching to an L2 - switched - network with a dual stack configuration", networkAttachmentConfigParams{ cidr: strings.Join([]string{secondaryLocalnetNetworkCIDR, secondaryIPv6CIDR}, ","), @@ -276,19 +278,19 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { ) const ( - clientPodName = "client-pod" - clientIPOffset = 100 - serverIPOffset = 102 - port = 9000 + clientPodName = "client-pod" + clientIPOffset = 100 // offset for IP generation from a given subnet for client pod + serverIPOffset = 102 + externalRouterIPOffset = 55 + port = 9000 ) - ginkgo.DescribeTable("attached to a localnet network mapped to external primary interface bridge", //nolint:lll - + DescribeTable("attached to a localnet network mapped to external primary interface bridge", //nolint:lll func(netConfigParams networkAttachmentConfigParams, clientPodConfig, serverPodConfig podConfiguration, isCollocatedPods bool) { - By("Get two scheduable nodes and ensure client and server are located on distinct Nodes") + By("Get two schedulable nodes and ensure client and server are located on distinct Nodes") nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.Background(), f.ClientSet, 2) - framework.ExpectNoError(err, "2 scheduable nodes are required") - Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "cluster should have at least 2 nodes") + framework.ExpectNoError(err, "2 schedulable nodes are required") + Expect(len(nodes.Items)).To(BeNumerically(">", 1), "cluster should have at least 2 nodes") if isCollocatedPods { clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].GetName()} serverPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].GetName()} @@ -296,11 +298,9 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].GetName()} serverPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[1].GetName()} } - netConfig := newNetworkAttachmentConfig(networkAttachmentConfigParams{ - name: secondaryNetworkName, - namespace: f.Namespace.Name, - topology: "localnet", - }) + + netConfigParams.namespace = f.Namespace.Name + netConfig := newNetworkAttachmentConfig(netConfigParams) if clientPodConfig.namespace == "" { clientPodConfig.namespace = f.Namespace.Name } @@ -323,13 +323,13 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { ) Expect(err).NotTo(HaveOccurred()) - if serverPodConfig.attachments != nil && serverPodConfig.needsIPRequestFromHostSubnet { + if len(serverPodConfig.attachments) > 0 && serverPodConfig.ipRequestFromSubnet != "" { By("finalizing the server pod IP configuration") err = addIPRequestToPodConfig(cs, &serverPodConfig, serverIPOffset) Expect(err).NotTo(HaveOccurred()) } - if clientPodConfig.attachments != nil && clientPodConfig.needsIPRequestFromHostSubnet { + if len(clientPodConfig.attachments) > 0 && clientPodConfig.ipRequestFromSubnet != "" { By("finalizing the client pod IP configuration") err = addIPRequestToPodConfig(cs, &clientPodConfig, clientIPOffset) Expect(err).NotTo(HaveOccurred()) @@ -339,27 +339,46 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { serverPod := kickstartPod(cs, serverPodConfig) By("instantiating the client pod") - kickstartPod(cs, clientPodConfig) + clientPod := kickstartPod(cs, clientPodConfig) + + serverInterface, err := getNetworkInterfaceName(serverPod, serverPodConfig, netConfig.name) + Expect(err).NotTo(HaveOccurred(), "failed to extract server pod interface name") + + clientInterface, err := getNetworkInterfaceName(clientPod, clientPodConfig, netConfig.name) + Expect(err).NotTo(HaveOccurred(), "failed to extract client pod interface name") + + // Add external container that will act as external router for the localnet + if (clientPodConfig.usesExternalRouter && len(clientPodConfig.attachments) > 0) || + (serverPodConfig.usesExternalRouter && len(serverPodConfig.attachments) > 0) { + By("instantiating the external container") + externalRouterName, err := createExternalRouter(providerCtx, cs, f, netConfig.vlanID, externalRouterIPOffset) + Expect(err).NotTo(HaveOccurred()) + + By("injecting routes via the external container") + err = injectStaticRoutesViaExternalContainer(f, cs, clientPodConfig, serverPodConfig, + clientInterface, serverInterface, externalRouterName, netConfig.vlanID) + Expect(err).NotTo(HaveOccurred()) + } // Check that the client pod can reach the server pod on the server localnet interface var serverIPs []string - if serverPodConfig.hostNetwork { - serverIPs, err = podIPsFromStatus(cs, serverPodConfig.namespace, serverPodConfig.name) - } else { + if len(serverPodConfig.attachments) > 0 { serverIPs, err = podIPsForAttachment(cs, serverPod.Namespace, serverPod.Name, netConfig.name) - + } else { + serverIPs, err = podIPsFromStatus(cs, serverPodConfig.namespace, serverPodConfig.name) } Expect(err).NotTo(HaveOccurred()) for _, serverIP := range serverIPs { - By(fmt.Sprintf("asserting the *client* can contact the server pod exposed endpoint: %q on port %q", serverIP, port)) curlArgs := []string{} pingArgs := []string{} - if clientPodConfig.attachments != nil { + if len(clientPodConfig.attachments) > 0 { // When the client is attached to a localnet, send probes from the localnet interface - curlArgs = []string{"--interface", "net1"} - pingArgs = []string{"-I", "net1"} + curlArgs = []string{"--interface", clientInterface} + pingArgs = []string{"-I", clientInterface} } + + By(fmt.Sprintf("asserting the *client* can contact the server pod exposed endpoint: %q on port %d", serverIP, port)) Eventually(func() error { return reachServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP, port, curlArgs...) }, 2*time.Minute, 6*time.Second).Should(Succeed()) @@ -370,8 +389,34 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { }, 2*time.Minute, 6*time.Second).Should(Succeed()) } }, - ginkgo.Entry( - "can be reached by a client pod in the default network on a different node", + + // The first setup we test is that of a localnet that uses IPs in the host subnet. + // Pod A is a pod in the default network, podL is a pod in a localnet. + // + // +-----------------------+ + // | Kubernetes Node | + // | ovn-worker2 | + // | | + // podA (10.244.1.10/24)---+-------[ br-int ]------+--- podL (172.18.0.4/16, net1) + // (default network) | | | (localnet) + // | [ br-ex ] | + // | 172.18.0.2 | + // | | | + // +-----------|-----------+ + // | + // host network + // 172.18.0.0/16 + // | + // +------------------------------------------+ + // | other hosts / routers / services | + // | (directly reachable in 172.18.0.0/16) | + // +------------------------------------------+ + // + // We test podA when it sits on top of the overlay network, as depicted above, and + // when it is host-networked. + Entry( + // default network -> localnet, different nodes + "can be reached by a client pod in the default network on a different node, when the localnet uses an IP in the host subnet", networkAttachmentConfigParams{ name: secondaryNetworkName, topology: "localnet", @@ -384,36 +429,36 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { attachments: []nadapi.NetworkSelectionElement{{ Name: secondaryNetworkName, }}, - name: podName, - containerCmd: httpServerContainerCmd(port), - needsIPRequestFromHostSubnet: true, // will override attachments above with an IPRequest + name: podName, + containerCmd: httpServerContainerCmd(port), + ipRequestFromSubnet: fromHostSubnet, // override attachments with an IPRequest from host subnet }, false, // scheduled on distinct Nodes - Label("BUG", "OCPBUGS-43004"), ), - ginkgo.Entry( - "can be reached by a client pod in the default network on the same node", + Entry( + // default network -> localnet, same node + "can be reached by a client pod in the default network on the same node, when the localnet uses an IP in the host subnet", networkAttachmentConfigParams{ name: secondaryNetworkName, topology: "localnet", }, podConfiguration{ // client on default network - name: clientPodName + "-same-node", + name: clientPodName, isPrivileged: true, }, podConfiguration{ // server attached to localnet secondary network attachments: []nadapi.NetworkSelectionElement{{ Name: secondaryNetworkName, }}, - name: podName, - containerCmd: httpServerContainerCmd(port), - needsIPRequestFromHostSubnet: true, + name: podName, + containerCmd: httpServerContainerCmd(port), + ipRequestFromSubnet: fromHostSubnet, }, true, // collocated on same Node - Label("BUG", "OCPBUGS-43004"), ), - ginkgo.Entry( - "can reach a host-networked pod on a different node", + Entry( + // localnet -> host network, different nodes + "can reach a host-networked pod on a different node, when the localnet uses an IP in the host subnet", networkAttachmentConfigParams{ name: secondaryNetworkName, topology: "localnet", @@ -422,20 +467,20 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { attachments: []nadapi.NetworkSelectionElement{{ Name: secondaryNetworkName, }}, - name: clientPodName, - isPrivileged: true, - needsIPRequestFromHostSubnet: true, + name: clientPodName, + isPrivileged: true, + ipRequestFromSubnet: fromHostSubnet, }, podConfiguration{ // server on default network, pod is host-networked name: podName, containerCmd: httpServerContainerCmd(port), hostNetwork: true, }, - false, // not collocated on same node - Label("STORY", "SDN-5345"), + false, // not collocated on the same node ), - ginkgo.Entry( - "can reach a host-networked pod on the same node", + Entry( + // localnet -> host network, same node + "can reach a host-networked pod on the same node, when the localnet uses an IP in the host subnet", networkAttachmentConfigParams{ name: secondaryNetworkName, topology: "localnet", @@ -444,17 +489,247 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { attachments: []nadapi.NetworkSelectionElement{{ Name: secondaryNetworkName, }}, - name: clientPodName, - isPrivileged: true, - needsIPRequestFromHostSubnet: true, + name: clientPodName, + isPrivileged: true, + ipRequestFromSubnet: fromHostSubnet, }, podConfiguration{ // server on default network, pod is host-networked name: podName, containerCmd: httpServerContainerCmd(port), hostNetwork: true, }, - true, // collocated on same node - Label("STORY", "SDN-5345"), + true, // collocated on the same node + ), + Entry( + // host network -> localnet, different nodes + "can be reached by a host-networked pod on a different node, when the localnet uses an IP in the host subnet", + networkAttachmentConfigParams{ + name: secondaryNetworkName, + topology: "localnet", + }, + podConfiguration{ // client is host-networked + name: clientPodName, + hostNetwork: true, + isPrivileged: true, + }, + podConfiguration{ // server on localnet + attachments: []nadapi.NetworkSelectionElement{{ + Name: secondaryNetworkName, + }}, + containerCmd: httpServerContainerCmd(port), + name: podName, + ipRequestFromSubnet: fromHostSubnet, + }, + false, // collocated on different nodes + ), + Entry( + // host network -> localnet, same node + "can be reached by a host-networked pod on the same node, when the localnet uses an IP in the host subnet", + networkAttachmentConfigParams{ + name: secondaryNetworkName, + topology: "localnet", + }, + podConfiguration{ // client is host-networked + name: clientPodName, + hostNetwork: true, + isPrivileged: true, + }, + podConfiguration{ // server on localnet + attachments: []nadapi.NetworkSelectionElement{{ + Name: secondaryNetworkName, + }}, + containerCmd: httpServerContainerCmd(port), + name: podName, + ipRequestFromSubnet: fromHostSubnet, + }, + true, // collocated on the same node + ), + // The second setup we test configures: a localnet that uses a VLAN, an external router + // that acts as gateway for the localnet pod for traffic destined to the host network. + // We implement the external router as an external container, where we create a VLAN interface + // on top of eth0 and assign to it an IP in the subnet in use by the localnet. + // Pod A is a pod in the default network, podL is a pod in a localnet. + // + // +-----------------------+ + // | Kubernetes Node | + // | ovn-worker2 | + // | | + // podA (10.244.1.10/24)---+-------[ br-int ]------+--- podL (172.20.0.4/16, net1) + // (default net) | | | (localnet, VLAN 10) + // | [ br-ex ] | + // | 172.18.0.2 | + // +-----------|-----------+ + // | + // host network + // 172.18.0.0/16 + // | + // +------------------------+ + // | external router | + // | | + // | eth0: 172.18.x.x | + // | eth0.10: 172.20.0.55 | + // +------------------------+ + // + // Packet path (ping podA → podL): + // podA (10.244.1.10) + // → br-int + // → br-ex (172.18.0.2, SNAT to node IP) + // → eth0 (external router, 172.18.x.x) + // → eth0.10 (external router, 172.20.0.55) + // → eth0 (external router) + // → br-ex (172.18.0.2) + // → br-int + // → podL (172.20.0.4) + // + // Reply traffic follows the reverse path. + + Entry( + // default network -> localnet, different nodes + "can be reached by a client pod in the default network on a different node, when the localnet uses a VLAN and an external router", + networkAttachmentConfigParams{ + name: secondaryNetworkName, + topology: "localnet", + vlanID: localnetVLANID, + }, + podConfiguration{ // client on default network + name: clientPodName, + isPrivileged: true, + }, + podConfiguration{ // server attached to localnet secondary network + attachments: []nadapi.NetworkSelectionElement{{ + Name: secondaryNetworkName, + }}, + name: podName, + containerCmd: httpServerContainerCmd(port), + ipRequestFromSubnet: fromExternalNetwork, + isPrivileged: true, + usesExternalRouter: true, + }, + false, // scheduled on distinct Nodes + ), + Entry( + // default network -> localnet, same node + "can be reached by a client pod in the default network on the same node, when the localnet uses a VLAN and an external router", + networkAttachmentConfigParams{ + name: secondaryNetworkName, + topology: "localnet", + vlanID: localnetVLANID, + }, + podConfiguration{ // client on default network + name: clientPodName, + isPrivileged: true, + }, + podConfiguration{ // server attached to localnet secondary network + attachments: []nadapi.NetworkSelectionElement{{ + Name: secondaryNetworkName, + }}, + name: podName, + containerCmd: httpServerContainerCmd(port), + ipRequestFromSubnet: fromExternalNetwork, + isPrivileged: true, + usesExternalRouter: true, + }, + true, // scheduled on the same node + ), + Entry( + // host network -> localnet, different nodes + "can be reached by a host-networked pod on a different node, when the localnet uses a VLAN and an external router", + networkAttachmentConfigParams{ + name: secondaryNetworkName, + topology: "localnet", + vlanID: localnetVLANID, + }, + podConfiguration{ // client on host network + name: clientPodName, + hostNetwork: true, + isPrivileged: true, + }, + podConfiguration{ // server attached to localnet secondary network + attachments: []nadapi.NetworkSelectionElement{{ + Name: secondaryNetworkName, + }}, + name: podName, + containerCmd: httpServerContainerCmd(port), + ipRequestFromSubnet: fromExternalNetwork, + isPrivileged: true, + usesExternalRouter: true, + }, + false, // scheduled on distinct Nodes + ), + Entry( + // host network -> localnet, same node + "can be reached by a host-networked pod on the same node, when the localnet uses a VLAN and an external router", + networkAttachmentConfigParams{ + name: secondaryNetworkName, + topology: "localnet", + vlanID: localnetVLANID, + }, + podConfiguration{ // client on host network + name: clientPodName, + hostNetwork: true, + isPrivileged: true, + }, + podConfiguration{ // server attached to localnet secondary network + attachments: []nadapi.NetworkSelectionElement{{ + Name: secondaryNetworkName, + }}, + name: podName, + containerCmd: httpServerContainerCmd(port), + ipRequestFromSubnet: fromExternalNetwork, + isPrivileged: true, + usesExternalRouter: true, + }, + true, // scheduled on the same node + ), + Entry( + // localnet -> host network, different nodes + "can reach a host-network pod on a different node, when the localnet uses a VLAN and an external router", + networkAttachmentConfigParams{ + name: secondaryNetworkName, + topology: "localnet", + vlanID: localnetVLANID, + }, + podConfiguration{ // client attached to localnet secondary network + attachments: []nadapi.NetworkSelectionElement{{ + Name: secondaryNetworkName, + }}, + name: clientPodName, + ipRequestFromSubnet: fromExternalNetwork, + isPrivileged: true, + usesExternalRouter: true, + }, + podConfiguration{ // server on host network + name: podName, + containerCmd: httpServerContainerCmd(port), + hostNetwork: true, + isPrivileged: true, + }, + false, // scheduled on distinct Nodes + ), + Entry( + // localnet -> host network, same node + "can reach a host-network pod on the same node, when the localnet uses a VLAN and an external router", + networkAttachmentConfigParams{ + name: secondaryNetworkName, + topology: "localnet", + vlanID: localnetVLANID, + }, + podConfiguration{ // client attached to localnet secondary network + attachments: []nadapi.NetworkSelectionElement{{ + Name: secondaryNetworkName, + }}, + name: clientPodName, + ipRequestFromSubnet: fromExternalNetwork, + isPrivileged: true, + usesExternalRouter: true, + }, + podConfiguration{ // server on host network + name: podName, + containerCmd: httpServerContainerCmd(port), + hostNetwork: true, + isPrivileged: true, + }, + true, // scheduled on the same node ), ) }) @@ -468,7 +743,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { staticServerIP = "192.168.200.20/24" ) - ginkgo.It("eventually configures pods that were added to an already existing network before the nad", func() { + It("eventually configures pods that were added to an already existing network before the nad", func() { netConfig := newNetworkAttachmentConfig(networkAttachmentConfigParams{ name: secondaryNetworkName, namespace: f.Namespace.Name, @@ -537,7 +812,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { }, 2*time.Minute, 6*time.Second).Should(Equal(v1.PodRunning)) }) - ginkgo.DescribeTable( + DescribeTable( "can communicate over the secondary network", func(netConfigParams networkAttachmentConfigParams, clientPodConfig podConfiguration, serverPodConfig podConfiguration) { netConfig := newNetworkAttachmentConfig(netConfigParams) @@ -561,10 +836,10 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { ) Expect(err).NotTo(HaveOccurred()) - By("Get two scheduable nodes and schedule client and server to be on distinct Nodes") + By("Get two schedulable nodes and schedule client and server to be on distinct Nodes") nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.Background(), f.ClientSet, 2) - framework.ExpectNoError(err, "2 scheduable nodes are required") - Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "cluster should have at least 2 nodes") + framework.ExpectNoError(err, "2 schedulable nodes are required") + Expect(len(nodes.Items)).To(BeNumerically(">", 1), "cluster should have at least 2 nodes") clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].GetName()} serverPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[1].GetName()} @@ -636,7 +911,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { }, 2*time.Minute, 6*time.Second).Should(Succeed()) } }, - ginkgo.Entry( + Entry( "can communicate over an L2 secondary network when the pods are scheduled in different nodes", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -653,7 +928,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { containerCmd: httpServerContainerCmd(port), }, ), - ginkgo.Entry( + Entry( "can communicate over an L2 - switched - secondary network with `excludeCIDR`s", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -671,7 +946,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { containerCmd: httpServerContainerCmd(port), }, ), - ginkgo.Entry( + Entry( "can communicate over an L3 - routed - secondary network", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -688,7 +963,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { containerCmd: httpServerContainerCmd(port), }, ), - ginkgo.Entry( + Entry( "can communicate over an L3 - routed - secondary network with IPv6 subnet", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -705,7 +980,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { containerCmd: httpServerContainerCmd(port), }, ), - ginkgo.Entry( + Entry( "can communicate over an L3 - routed - secondary network with a dual stack configuration", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -722,7 +997,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { containerCmd: httpServerContainerCmd(port), }, ), - ginkgo.Entry( + Entry( "can communicate over an L2 - switched - secondary network without IPAM", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -740,7 +1015,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { isPrivileged: true, }, ), - ginkgo.Entry( + Entry( "can communicate over an L2 secondary network without IPAM, with static IPs configured via network selection elements", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -762,7 +1037,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { containerCmd: httpServerContainerCmd(port), }, ), - ginkgo.Entry( + Entry( "can communicate over an L2 secondary network with an IPv6 subnet when pods are scheduled in different nodes", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -779,7 +1054,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { containerCmd: httpServerContainerCmd(port), }, ), - ginkgo.Entry( + Entry( "can communicate over an L2 secondary network with a dual stack configuration", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -796,7 +1071,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { containerCmd: httpServerContainerCmd(port), }, ), - ginkgo.Entry( + Entry( "can communicate over a localnet secondary network when the pods are scheduled on different nodes", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -814,7 +1089,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { containerCmd: httpServerContainerCmd(port), }, ), - ginkgo.Entry( + Entry( "can communicate over a localnet secondary network without IPAM when the pods are scheduled on different nodes", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -833,7 +1108,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { isPrivileged: true, }, ), - ginkgo.Entry( + Entry( "can communicate over a localnet secondary network without IPAM when the pods are scheduled on different nodes, with static IPs configured via network selection elements", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -856,7 +1131,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { containerCmd: httpServerContainerCmd(port), }, ), - ginkgo.Entry( + Entry( "can communicate over a localnet secondary network with an IPv6 subnet when pods are scheduled on different nodes", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -874,7 +1149,7 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { containerCmd: httpServerContainerCmd(port), }, ), - ginkgo.Entry( + Entry( "can communicate over a localnet secondary network with a dual stack configuration when pods are scheduled on different nodes", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1227,7 +1502,6 @@ var _ = Describe("Multi Homing", feature.MultiHoming, func() { nil, nil, ), - Label("BUG", "OCPBUGS-25928"), ), Entry( "ingress denyall, egress allow all, ingress policy should have no impact on egress", @@ -1393,7 +1667,7 @@ ip a add %[4]s/24 dev %[2]s }, 2*time.Minute, 5*time.Second).Should(BeTrue()) }) - ginkgo.DescribeTable( + DescribeTable( "configure traffic allow lists", func(netConfigParams networkAttachmentConfigParams, allowedClientPodConfig podConfiguration, blockedClientPodConfig podConfiguration, serverPodConfig podConfiguration, policy *mnpapi.MultiNetworkPolicy) { netConfig := newNetworkAttachmentConfig(netConfigParams) @@ -1434,7 +1708,7 @@ ip a add %[4]s/24 dev %[2]s By("asserting the *blocked-client* pod **cannot** contact the server pod exposed endpoint") Expect(connectToServer(blockedClientPodConfig, serverIP, port)).To(MatchError(ContainSubstring("exit code 28"))) }, - ginkgo.Entry( + Entry( "using pod selectors for a pure L2 overlay", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1471,7 +1745,7 @@ ip a add %[4]s/24 dev %[2]s multiNetPolicyPort(port), ), ), - ginkgo.Entry( + Entry( "using pod selectors and port range for a pure L2 overlay", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1509,7 +1783,7 @@ ip a add %[4]s/24 dev %[2]s multiNetPolicyPortRange(port-3, port+5), ), ), - ginkgo.Entry( + Entry( "using pod selectors for a routed topology", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1546,7 +1820,7 @@ ip a add %[4]s/24 dev %[2]s multiNetPolicyPort(port), ), ), - ginkgo.Entry( + Entry( "using pod selectors for a localnet topology", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1583,7 +1857,7 @@ ip a add %[4]s/24 dev %[2]s multiNetPolicyPort(port), ), ), - ginkgo.Entry( + Entry( "using IPBlock for a pure L2 overlay", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1615,7 +1889,7 @@ ip a add %[4]s/24 dev %[2]s port, ), ), - ginkgo.Entry( + Entry( "using IPBlock for a routed topology", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1647,7 +1921,7 @@ ip a add %[4]s/24 dev %[2]s port, ), ), - ginkgo.Entry( + Entry( "using IPBlock for a localnet topology", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1679,7 +1953,7 @@ ip a add %[4]s/24 dev %[2]s port, ), ), - ginkgo.Entry( + Entry( "using namespace selectors for a pure L2 overlay", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1713,7 +1987,7 @@ ip a add %[4]s/24 dev %[2]s port, ), ), - ginkgo.Entry( + Entry( "using namespace selectors for a routed topology", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1747,7 +2021,7 @@ ip a add %[4]s/24 dev %[2]s port, ), ), - ginkgo.Entry( + Entry( "using namespace selectors for a localnet topology", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1782,7 +2056,7 @@ ip a add %[4]s/24 dev %[2]s ), ), - ginkgo.Entry( + Entry( "using IPBlock for an IPAMless pure L2 overlay", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1816,7 +2090,7 @@ ip a add %[4]s/24 dev %[2]s ), ) - ginkgo.DescribeTable( + DescribeTable( "allow all ingress", func(netConfigParams networkAttachmentConfigParams, clientPodConfig podConfiguration, serverPodConfig podConfiguration, policy *mnpapi.MultiNetworkPolicy) { netConfig := newNetworkAttachmentConfig(netConfigParams) @@ -1843,7 +2117,7 @@ ip a add %[4]s/24 dev %[2]s return reachServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP, port) }, 2*time.Minute, 6*time.Second).Should(Succeed()) }, - ginkgo.Entry( + Entry( "using ingress allow-all for a localnet topology", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1875,7 +2149,7 @@ ip a add %[4]s/24 dev %[2]s nil, ), ), - ginkgo.XEntry( + XEntry( "using egress deny-all for a localnet topology", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1906,9 +2180,8 @@ ip a add %[4]s/24 dev %[2]s nil, nil, ), - Label("BUG", "OCPBUGS-25928"), ), - ginkgo.Entry( + Entry( "using egress deny-all, ingress allow-all for a localnet topology", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -1944,7 +2217,7 @@ ip a add %[4]s/24 dev %[2]s ), ) - ginkgo.DescribeTable( + DescribeTable( "deny traffic", func(netConfigParams networkAttachmentConfigParams, clientPodConfig podConfiguration, serverPodConfig podConfiguration, policy *mnpapi.MultiNetworkPolicy) { netConfig := newNetworkAttachmentConfig(netConfigParams) @@ -1971,7 +2244,7 @@ ip a add %[4]s/24 dev %[2]s return reachServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP, port) }, 2*time.Minute, 6*time.Second).Should(Not(Succeed())) }, - ginkgo.Entry( + Entry( "using ingress deny-all for a localnet topology", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -2003,7 +2276,7 @@ ip a add %[4]s/24 dev %[2]s nil, ), ), - ginkgo.Entry( + Entry( "using pod selectors and wrong port range for a localnet topology", networkAttachmentConfigParams{ name: secondaryNetworkName, @@ -2113,8 +2386,12 @@ ip a add %[4]s/24 dev %[2]s func kickstartPod(cs clientset.Interface, configuration podConfiguration) *v1.Pod { podNamespacedName := fmt.Sprintf("%s/%s", configuration.namespace, configuration.name) + var ( + pod *v1.Pod + err error + ) By(fmt.Sprintf("instantiating pod %q", podNamespacedName)) - createdPod, err := cs.CoreV1().Pods(configuration.namespace).Create( + _, err = cs.CoreV1().Pods(configuration.namespace).Create( context.Background(), generatePodSpec(configuration), metav1.CreateOptions{}, @@ -2123,13 +2400,15 @@ func kickstartPod(cs clientset.Interface, configuration podConfiguration) *v1.Po By(fmt.Sprintf("asserting that pod %q reaches the `Ready` state", podNamespacedName)) EventuallyWithOffset(1, func() v1.PodPhase { - updatedPod, err := cs.CoreV1().Pods(configuration.namespace).Get(context.Background(), configuration.name, metav1.GetOptions{}) + p, err := cs.CoreV1().Pods(configuration.namespace).Get(context.Background(), configuration.name, metav1.GetOptions{}) if err != nil { return v1.PodFailed } - return updatedPod.Status.Phase + pod = p + return p.Status.Phase + }, 2*time.Minute, 6*time.Second).Should(Equal(v1.PodRunning)) - return createdPod + return pod // return the updated pod } func createNads(f *framework.Framework, nadClient nadclient.K8sCniCncfIoV1Interface, extraNamespace *v1.Namespace, netConfig networkAttachmentConfig) error { @@ -2180,71 +2459,46 @@ func createMultiNetworkPolicy(mnpClient mnpclient.K8sCniCncfIoV1beta1Interface, return err } -func computeIPWithOffset(baseAddr string, increment int) (string, error) { - addr, err := netip.ParsePrefix(baseAddr) +// generateIPsFromNodePrimaryNetworkAddresses returns IPv4 and IPv6 addresses at the provided offset from the primary interface network addresses found on the node +func generateIPsFromNodePrimaryNetworkAddresses(cs clientset.Interface, nodeName string, offset int) ([]string, error) { + hostSubnets, err := getHostSubnetsForNode(cs, nodeName) if err != nil { - return "", fmt.Errorf("Failed to parse CIDR %v", err) + return nil, fmt.Errorf("failed to get host subnets for node %q: %w", nodeName, err) } + return generateIPsFromSubnets(hostSubnets, offset) +} - ip := addr.Addr() - - for i := 0; i < increment; i++ { - ip = ip.Next() - if !ip.IsValid() { - return "", fmt.Errorf("overflow: IP address exceeds bounds") - } +func addIPRequestToPodConfig(cs clientset.Interface, podConfig *podConfiguration, offset int) error { + nodeName, ok := podConfig.nodeSelector[nodeHostnameKey] + if !ok { + return fmt.Errorf("missing node selector %q in podConfig for pod %s/%s", nodeHostnameKey, podConfig.namespace, podConfig.name) } - return netip.PrefixFrom(ip, addr.Bits()).String(), nil -} - -// Given a node name and an offset, generateIPsFromNodePrimaryIfAddr returns an IPv4 and an IPv6 address -// at the provided offset from the primary interface addresses found on the node. -func generateIPsFromNodePrimaryIfAddr(cs clientset.Interface, nodeName string, offset int) ([]string, error) { - var newAddresses []string + var ( + ipsToRequest []string + err error + ) - node, err := cs.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("Failed to get node %s: %v", nodeName, err) - } + switch podConfig.ipRequestFromSubnet { + case fromHostSubnet: + ipsToRequest, err = generateIPsFromNodePrimaryNetworkAddresses(cs, nodeName, offset) - nodeIfAddr, err := util.GetNodeIfAddrAnnotation(node) - if err != nil { - return nil, err - } - nodeAddresses := []string{} - if nodeIfAddr.IPv4 != "" { - nodeAddresses = append(nodeAddresses, nodeIfAddr.IPv4) - } - if nodeIfAddr.IPv6 != "" { - nodeAddresses = append(nodeAddresses, nodeIfAddr.IPv6) - } - for _, nodeAddress := range nodeAddresses { - ipGen, err := ipgenerator.NewIPGenerator(nodeAddress) - if err != nil { - return nil, err - } - newIP, err := ipGen.GenerateIP(offset) - if err != nil { - return nil, err + case fromExternalNetwork: + subnets := filterCIDRs(cs, externalNetworkSubnetV4, externalNetworkSubnetV6) + if len(subnets) == 0 { + return fmt.Errorf("no external network subnets available for IP family support") } - newAddresses = append(newAddresses, newIP.String()) - } - return newAddresses, nil -} + ipsToRequest, err = generateIPsFromSubnets(subnets, offset) -func addIPRequestToPodConfig(cs clientset.Interface, podConfig *podConfiguration, offset int) error { - nodeName, ok := podConfig.nodeSelector[nodeHostnameKey] - if !ok { - return fmt.Errorf("No node selector found on podConfig") + default: + return fmt.Errorf("unknown or unimplemented subnet source: %q", podConfig.ipRequestFromSubnet) } - IPsToRequest, err := generateIPsFromNodePrimaryIfAddr(cs, nodeName, offset) if err != nil { return err } for i := range podConfig.attachments { - podConfig.attachments[i].IPRequest = IPsToRequest + podConfig.attachments[i].IPRequest = ipsToRequest } return nil } diff --git a/test/e2e/multihoming_external_router_utils.go b/test/e2e/multihoming_external_router_utils.go new file mode 100644 index 0000000000..552fadad99 --- /dev/null +++ b/test/e2e/multihoming_external_router_utils.go @@ -0,0 +1,428 @@ +package e2e + +import ( + "context" + "fmt" + "net/netip" + "strings" + + . "github.com/onsi/ginkgo/v2" + + "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/images" + "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" + infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + utilnet "k8s.io/utils/net" +) + +// buildRouteToHostSubnetViaExternalContainer returns ip route add commands to reach the host subnets via the provided gateway IPs +func buildRouteToHostSubnetViaExternalContainer(cs clientset.Interface, nodeName string, gwV4, gwV6, interfaceName string) ([]string, error) { + cmdTemplateV4 := "ip -4 route replace %s via " + gwV4 + " dev " + interfaceName + cmdTemplateV6 := "ip -6 route replace %s via " + gwV6 + " dev " + interfaceName + cmds := []string{} + hostSubnets, err := getHostSubnetsForNode(cs, nodeName) + if err != nil { + return nil, fmt.Errorf("failed to get host subnets for node %s: %w", nodeName, err) + } + for _, hostSubnet := range hostSubnets { + if utilnet.IsIPv4CIDRString(hostSubnet) && gwV4 != "" { + cmds = append(cmds, fmt.Sprintf(cmdTemplateV4, hostSubnet)) + } else if utilnet.IsIPv6CIDRString(hostSubnet) && gwV6 != "" { + cmds = append(cmds, fmt.Sprintf(cmdTemplateV6, hostSubnet)) + } + } + return cmds, nil +} + +// injectRouteViaExternalContainerIntoPod computes and applies host routes inside the given pod to reach +// the host subnets via the external container (VLAN interface), depending on +// the pod's ipRequestFromSubnet field. +func injectRouteViaExternalContainerIntoPod(f *framework.Framework, cs clientset.Interface, podConfig podConfiguration, + podInterfaceName, externalContainerName string, vlanID int) error { + + nodeName, ok := podConfig.nodeSelector[nodeHostnameKey] + if !ok { + return fmt.Errorf("nodeSelector should contain %s key", nodeHostnameKey) + } + + cmds := []string{} + vlanIface := fmt.Sprintf("%s.%d", "eth0", vlanID) + gwV4IPs, gwV6IPs, err := getExternalContainerInterfaceIPs(externalContainerName, vlanIface) + if err != nil { + return fmt.Errorf("failed to get external container interface IPs: %w", err) + } + if len(gwV4IPs) == 0 && len(gwV6IPs) == 0 { + return fmt.Errorf("no IPs found on VLAN interface %s of external container %s", vlanIface, externalContainerName) + } + + // Normalize IP addresses by removing CIDR notation if present + gwV4IPs, err = normalizeIPAddresses(gwV4IPs) + if err != nil { + return fmt.Errorf("failed to normalize IPv4 addresses from external container interface: %w", err) + } + gwV6IPs, err = normalizeIPAddresses(gwV6IPs) + if err != nil { + return fmt.Errorf("failed to normalize IPv6 addresses from external container interface: %w", err) + } + + // Take the first container IP as gateway. + var gwIPV4, gwIPV6 string + if len(gwV4IPs) > 0 { + gwIPV4 = gwV4IPs[0] + } + + for _, ip := range gwV6IPs { + if addr, err := netip.ParseAddr(ip); err == nil && !addr.IsLinkLocalUnicast() { + gwIPV6 = ip + break + } + } + + cmds, err = buildRouteToHostSubnetViaExternalContainer(cs, nodeName, gwIPV4, gwIPV6, podInterfaceName) + if err != nil { + return fmt.Errorf("failed to build route to host subnet via external container: %w", err) + } + + for _, cmd := range cmds { + framework.Logf("Adding to pod %s/%s route to host subnet via external container %s: %s", podConfig.namespace, podConfig.name, externalContainerName, cmd) + _, stderr, err := ExecShellInPodWithFullOutput(f, podConfig.namespace, podConfig.name, cmd) + if err != nil || stderr != "" { + return fmt.Errorf("failed to add route to external container (cmd=%s): stderr=%s, err=%w\n", cmd, stderr, err) + } + } + + return nil +} + +// createExternalRouter creates an external container that acts as a router for localnet testing +func createExternalRouter(providerCtx infraapi.Context, cs clientset.Interface, f *framework.Framework, vlanID, ipOffset int) (string, error) { + // Add external container that will act as external router for the localnet + primaryProviderNetwork, err := infraprovider.Get().PrimaryNetwork() + if err != nil { + return "", fmt.Errorf("failed to get primary provider network: %w", err) + } + externalContainerName := f.Namespace.Name + "-external-router" + + routerSubnets := filterCIDRs(cs, externalNetworkSubnetV4, externalNetworkSubnetV6) + routerIPs, err := generateIPsFromSubnets(routerSubnets, ipOffset) + if err != nil { + return "", fmt.Errorf("failed to generate IP for external router: %w", err) + } + if len(routerIPs) == 0 { + return "", fmt.Errorf("no supported IP families found for the external router") + } + + // - create a VLAN interface on top of eth0. + // - assign the generated IP to the VLAN interface. + // - enable IP forwarding. + // - sleep to keep the container running. + var commandBuilder strings.Builder + commandBuilder.WriteString(fmt.Sprintf("ip link add link eth0 name eth0.%d type vlan id %d; ", vlanID, vlanID)) + for _, ip := range routerIPs { + commandBuilder.WriteString(fmt.Sprintf("ip addr add %s dev eth0.%d; ", ip, vlanID)) + } + commandBuilder.WriteString(fmt.Sprintf("ip link set eth0.%d up; ", vlanID)) + commandBuilder.WriteString("sysctl -w net.ipv4.ip_forward=1; ") + commandBuilder.WriteString("sysctl -w net.ipv6.conf.all.forwarding=1; ") + commandBuilder.WriteString("sleep infinity") + + externalContainerSpec := infraapi.ExternalContainer{ + Name: externalContainerName, + Image: images.AgnHost(), + Network: primaryProviderNetwork, + Entrypoint: "bash", + CmdArgs: []string{"-c", commandBuilder.String()}, + } + + _, err = providerCtx.CreateExternalContainer(externalContainerSpec) + if err != nil { + return "", fmt.Errorf("failed to create external router container: %w", err) + } + + return externalContainerName, nil +} + +// injectStaticRoutesViaExternalContainer configures the localnet pod to reach the host subnet and +// the hosts/OVN to reach the localnet subnet. +// We need to inject static routes in the following places: +// +// 1. on the localnet pod we need a route to reach the host subnet +// via the VLAN interface of the external container; +// +// 2. in NBDB, if the cluster is in shared gateway mode, we need a route that tells +// OVN to route traffic to the localnet subnet via the external container IPs; +// +// 3. in the host routing table, if the cluster is in local gateway mode, we need a route +// that tells the host to route traffic to the localnet subnet via the external container IPs. +// We need this also for host-networked pods to reach the localnet subnet regardless of the gateway mode. +func injectStaticRoutesViaExternalContainer(f *framework.Framework, cs clientset.Interface, + clientPodConfig, serverPodConfig podConfiguration, clientInterface, serverInterface, externalContainerName string, vlanID int) error { + if clientPodConfig.usesExternalRouter && len(clientPodConfig.attachments) > 0 { + if err := injectRouteViaExternalContainerIntoPod(f, cs, clientPodConfig, clientInterface, externalContainerName, vlanID); err != nil { + return fmt.Errorf("failed to add route to client pod %s/%s: %w", clientPodConfig.namespace, clientPodConfig.name, err) + } + } + + if serverPodConfig.usesExternalRouter && len(serverPodConfig.attachments) > 0 { + if err := injectRouteViaExternalContainerIntoPod(f, cs, serverPodConfig, serverInterface, externalContainerName, vlanID); err != nil { + return fmt.Errorf("failed to add route to server pod %s/%s: %w", serverPodConfig.namespace, serverPodConfig.name, err) + } + } + + if err := injectStaticRoutesIntoNodes(f, cs, externalContainerName); err != nil { + return fmt.Errorf("failed to add static routes into nodes: %w", err) + } + return nil +} + +// injectStaticRoutesIntoNodes adds routes for externalNetworkSubnetV4/V6 +// via the external container IPs on the primary provider network. +// The type of routes differs according to the OVNK architecture (interconnect vs centralized) +// and the gateway mode: +// | | Local GW | Shared GW | +// | IC | linux route on all node | linux routes on all nodes; OVN routes on all nodes for the local GW router | +// | non-IC | linux routes on all nodes | linux routes on all nodes; OVN routes on NBDB leader for all GW routers | +func injectStaticRoutesIntoNodes(f *framework.Framework, cs clientset.Interface, externalContainerName string) error { + framework.Logf("Injecting Linux kernel routes for host-networked pods (and for OVN pods when in local gateway mode)") + if err := injectRoutesWithCommandBuilder(f, cs, externalContainerName, hostRoutingTableCommandBuilder{}); err != nil { + return err + } + + if !IsGatewayModeLocal(cs) { + framework.Logf("Shared gateway mode: injecting OVN routes for overlay pods") + if err := injectRoutesWithCommandBuilder(f, cs, externalContainerName, ovnLogicalRouterCommandBuilder{}); err != nil { + return err + } + } + + return nil +} + +// routeCommand represents a route add/delete command for a specific gateway mode +type routeCommand struct { + addCmd []string + deleteCmd []string + logMsg string + target string // what we're adding the route to (logical router name or node name) +} + +// routeCommandBuilder defines the interface for building gateway-mode-specific route commands +type routeCommandBuilder interface { + buildRouteCommand(nodeName, cidr, nextHop string) routeCommand +} + +// ovnLogicalRouterCommandBuilder builds commands for OVN logical routes (shared gateway mode) +type ovnLogicalRouterCommandBuilder struct{} + +func (b ovnLogicalRouterCommandBuilder) buildRouteCommand(nodeName, cidr, nextHop string) routeCommand { + logicalRouterName := "GR_" + nodeName + return routeCommand{ + addCmd: []string{"ovn-nbctl", "--may-exist", "--", "lr-route-add", logicalRouterName, cidr, nextHop}, + deleteCmd: []string{"ovn-nbctl", "--if-exists", "--", "lr-route-del", logicalRouterName, cidr}, + logMsg: fmt.Sprintf("OVN logical router route %s via %s to %s", cidr, nextHop, logicalRouterName), + target: logicalRouterName, + } +} + +// hostRoutingTableCommandBuilder builds commands for routes on the host +type hostRoutingTableCommandBuilder struct{} + +func (b hostRoutingTableCommandBuilder) buildRouteCommand(nodeName, cidr, nextHop string) routeCommand { + return routeCommand{ + addCmd: []string{"ip", "route", "replace", cidr, "via", nextHop}, + deleteCmd: []string{"ip", "route", "del", cidr, "via", nextHop}, + logMsg: fmt.Sprintf("host route %s via %s to node %s", cidr, nextHop, nodeName), + target: nodeName, + } +} + +// getOvnKubePodsForRouteInjection determines which pods to use for route injection based on the command builder type +// and cluster configuration. +func getOvnKubePodsForRouteInjection(f *framework.Framework, cs clientset.Interface, cmdBuilder routeCommandBuilder) (*v1.PodList, error) { + ovnKubernetesNamespace := deploymentconfig.Get().OVNKubernetesNamespace() + + var podList *v1.PodList + var err error + + if _, isHostRoutingCommand := cmdBuilder.(hostRoutingTableCommandBuilder); isHostRoutingCommand { + framework.Logf("Host routing command: selecting all ovnkube-node pods") + podList, err = cs.CoreV1().Pods(ovnKubernetesNamespace).List(context.TODO(), metav1.ListOptions{LabelSelector: "name=ovnkube-node"}) + } else if isInterconnectEnabled() { + framework.Logf("OVN command with interconnect: selecting all OVN DB pods") + podList, err = cs.CoreV1().Pods(ovnKubernetesNamespace).List(context.TODO(), metav1.ListOptions{LabelSelector: "ovn-db-pod=true"}) + } else { + framework.Logf("OVN command without interconnect: selecting DB leader pod") + leaderPod, findErr := findOVNDBLeaderPod(f, cs, ovnKubernetesNamespace) + if findErr != nil { + return nil, fmt.Errorf("failed to find OVN DB leader pod: %w", findErr) + } + podList = &v1.PodList{Items: []v1.Pod{*leaderPod}} + } + + if err != nil { + return nil, err + } + + if len(podList.Items) == 0 { + return nil, fmt.Errorf("no ovnkube pods found to execute route commands") + } + + return podList, nil +} + +// getTargetNodesForRouteInjection determines which nodes should be targeted for route injection +// based on the command builder type and cluster configuration. +func getTargetNodesForRouteInjection(cs clientset.Interface, cmdBuilder routeCommandBuilder, nodeName string) ([]string, error) { + // For host routing commands, always target the current pod's node + if _, isHostRoutingCommand := cmdBuilder.(hostRoutingTableCommandBuilder); isHostRoutingCommand { + return []string{nodeName}, nil + } + + // OVN routes + if isInterconnectEnabled() { + return []string{nodeName}, nil // each pod targets its own gateway router + } + + // non-interconnect mode: DB pod targets all gateway routers + allNodes, err := cs.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to list nodes: %w", err) + } + + var targetNodeNames []string + for _, node := range allNodes.Items { + targetNodeNames = append(targetNodeNames, node.Name) + } + return targetNodeNames, nil +} + +// routeInfo represents a route destination and next-hop pair +type routeInfo struct { + destination string + nextHop string +} + +// buildRouteList creates a list of routes based on available IP families and gateways +func buildRouteList(clientSet clientset.Interface, v4gateway, v6gateway string) ([]routeInfo, error) { + var routes []routeInfo + + if isIPv4Supported(clientSet) && v4gateway != "" { + routes = append(routes, routeInfo{ + destination: externalNetworkSubnetV4, + nextHop: v4gateway, + }) + } + + if isIPv6Supported(clientSet) && v6gateway != "" { + routes = append(routes, routeInfo{ + destination: externalNetworkSubnetV6, + nextHop: v6gateway, + }) + } + + if len(routes) == 0 { + return nil, fmt.Errorf("no routes to inject (check IP families and external container addresses)") + } + + return routes, nil +} + +// podExecutionContext holds pod-specific information for route execution +type podExecutionContext struct { + pod v1.Pod + containerName string + targetNodes []string + cmdBuilder routeCommandBuilder +} + +func newPodExecutionContext(cs clientset.Interface, cmdBuilder routeCommandBuilder, pod v1.Pod) (*podExecutionContext, error) { + targetNodes, err := getTargetNodesForRouteInjection(cs, cmdBuilder, pod.Spec.NodeName) + if err != nil { + return nil, fmt.Errorf("failed to get target nodes for pod %s: %w", pod.Name, err) + } + + return &podExecutionContext{ + pod: pod, + containerName: pod.Spec.Containers[0].Name, + targetNodes: targetNodes, + cmdBuilder: cmdBuilder, + }, nil +} + +func (ctx *podExecutionContext) executeAllRoutes(f *framework.Framework, routes []routeInfo) error { + for _, route := range routes { + for _, targetNode := range ctx.targetNodes { + routeCmd := ctx.cmdBuilder.buildRouteCommand(targetNode, route.destination, route.nextHop) + + if err := addRoute(f, ctx.pod.Namespace, ctx.pod.Name, ctx.containerName, routeCmd); err != nil { + return err + } + + scheduleRouteCleanup(f, ctx.pod.Namespace, ctx.pod.Name, ctx.containerName, routeCmd) + } + } + return nil +} + +func addRoute(f *framework.Framework, namespace, podName, containerName string, routeCmd routeCommand) error { + stdout, stderr, err := ExecCommandInContainerWithFullOutput(f, namespace, podName, containerName, routeCmd.addCmd...) + if err != nil || stderr != "" { + return fmt.Errorf("failed to add %s (pod=%s, container=%s): stdout=%q, stderr=%q, cmd=%v: %w", + routeCmd.logMsg, podName, containerName, stdout, stderr, routeCmd.addCmd, err) + } + framework.Logf("Successfully added %s", routeCmd.logMsg) + return nil +} + +func scheduleRouteCleanup(f *framework.Framework, namespace, podName, containerName string, routeCmd routeCommand) { + DeferCleanup(func() { + _, stderr, err := ExecCommandInContainerWithFullOutput(f, namespace, podName, containerName, routeCmd.deleteCmd...) + if err != nil { + framework.Logf("Warning: Failed to delete route from %s (cmd=%s): %v, stderr: %s", + routeCmd.target, routeCmd.deleteCmd, err, stderr) + } + }) +} + +func injectRoutesWithCommandBuilder(f *framework.Framework, cs clientset.Interface, externalContainerName string, cmdBuilder routeCommandBuilder) error { + primaryProviderNetwork, err := infraprovider.Get().PrimaryNetwork() + if err != nil { + return fmt.Errorf("failed to get primary network: %w", err) + } + + v4gateway, v6gateway, err := getExternalContainerInterfaceIPsOnNetwork(externalContainerName, primaryProviderNetwork.Name()) + if err != nil { + return fmt.Errorf("failed to get external container interface IPs on provider network: %w", err) + } + + // Build list of routes to inject + routes, err := buildRouteList(f.ClientSet, v4gateway, v6gateway) + if err != nil { + return err + } + + // Get target pods for route injection + ovnkubePods, err := getOvnKubePodsForRouteInjection(f, cs, cmdBuilder) + if err != nil { + return fmt.Errorf("failed to select target pods: %w", err) + } + + // Execute route commands for each pod + for _, pod := range ovnkubePods.Items { + podCtx, err := newPodExecutionContext(cs, cmdBuilder, pod) + if err != nil { + return err + } + + if err := podCtx.executeAllRoutes(f, routes); err != nil { + return err + } + } + + return nil +} diff --git a/test/e2e/multihoming_utils.go b/test/e2e/multihoming_utils.go index 816f7cccd1..39b0fbc4ac 100644 --- a/test/e2e/multihoming_utils.go +++ b/test/e2e/multihoming_utils.go @@ -24,6 +24,8 @@ import ( mnpapi "github.com/k8snetworkplumbingwg/multi-networkpolicy/pkg/apis/k8s.cni.cncf.io/v1beta1" nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" utilnet "k8s.io/utils/net" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/ip" ) func netCIDR(netCIDR string, netPrefixLengthPerNode int) string { @@ -72,7 +74,7 @@ func filterIPs(cs clientset.Interface, ips ...string) []string { } func filterIPsAndJoin(cs clientset.Interface, ips string) string { - return joinStrings(filterIPs(cs, strings.Split(ips, ",")...)...) + return joinStrings(filterIPs(cs, strings.Split(ips, ",")...)...) } func getNetCIDRSubnet(netCIDR string) (string, error) { @@ -190,23 +192,37 @@ func patchNADSpec(nadClient nadclient.K8sCniCncfIoV1Interface, name, namespace s } type podConfiguration struct { - attachments []nadapi.NetworkSelectionElement - containerCmd []string - name string - namespace string - nodeSelector map[string]string - isPrivileged bool - labels map[string]string - requiresExtraNamespace bool - hostNetwork bool - needsIPRequestFromHostSubnet bool + attachments []nadapi.NetworkSelectionElement + containerCmd []string + name string + namespace string + nodeSelector map[string]string + isPrivileged bool + labels map[string]string + annotations map[string]string + requiresExtraNamespace bool + hostNetwork bool + ipRequestFromSubnet string + usesExternalRouter bool } func generatePodSpec(config podConfiguration) *v1.Pod { podSpec := e2epod.NewAgnhostPod(config.namespace, config.name, nil, nil, nil, config.containerCmd...) + + // Merge network attachments and custom annotations + if podSpec.Annotations == nil { + podSpec.Annotations = make(map[string]string) + } if len(config.attachments) > 0 { - podSpec.Annotations = networkSelectionElements(config.attachments...) + attachmentAnnotations := networkSelectionElements(config.attachments...) + for k, v := range attachmentAnnotations { + podSpec.Annotations[k] = v + } + } + for k, v := range config.annotations { + podSpec.Annotations[k] = v } + podSpec.Spec.NodeSelector = config.nodeSelector podSpec.Labels = config.labels podSpec.Spec.HostNetwork = config.hostNetwork @@ -355,7 +371,7 @@ func pingServer(clientPodConfig podConfiguration, serverIP string, args ...strin clientPodConfig.name, "--", "ping", - "-c", "1", // send one ICMP echo request + "-c", "3", // send three ICMP echo requests "-W", "2", // timeout after 2 seconds if no response } baseArgs = append(baseArgs, args...) @@ -405,7 +421,11 @@ func podIPsForAttachment(k8sClient clientset.Interface, podNamespace string, pod if err != nil { return nil, err } - if len(netStatus) != 1 { + + if len(netStatus) == 0 { + return nil, fmt.Errorf("no status entry for attachment %s on pod %s", attachmentName, namespacedName(podNamespace, podName)) + } + if len(netStatus) > 1 { return nil, fmt.Errorf("more than one status entry for attachment %s on pod %s", attachmentName, namespacedName(podNamespace, podName)) } if len(netStatus[0].IPs) == 0 { @@ -768,3 +788,27 @@ func getPodAnnotationIPsForAttachmentByIndex(k8sClient clientset.Interface, podN } return ipnets[index].IP.String(), nil } + +// generateIPsFromSubnets generates IP addresses from the given subnets with the specified offset +func generateIPsFromSubnets(subnets []string, offset int) ([]string, error) { + var addrs []string + for _, s := range subnets { + s = strings.TrimSpace(s) + if s == "" { + continue + } + ipGen, err := ip.NewIPGenerator(s) + if err != nil { + return nil, err + } + ip, err := ipGen.GenerateIP(offset) + if err != nil { + return nil, err + } + addrs = append(addrs, ip.String()) + } + if len(addrs) == 0 { + return nil, fmt.Errorf("no valid subnets provided") + } + return addrs, nil +} diff --git a/test/e2e/network_segmentation.go b/test/e2e/network_segmentation.go index 36bcfafd26..f34bdf2a42 100644 --- a/test/e2e/network_segmentation.go +++ b/test/e2e/network_segmentation.go @@ -12,6 +12,7 @@ import ( udnv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" @@ -21,7 +22,6 @@ import ( "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -60,13 +60,13 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { nodeHostnameKey = "kubernetes.io/hostname" podClusterNetPort uint16 = 9000 podClusterNetDefaultPort uint16 = 8080 - userDefinedNetworkIPv4Subnet = "10.128.0.0/16" + userDefinedNetworkIPv4Subnet = "172.31.0.0/16" // last subnet in private range 172.16.0.0/12 (rfc1918) userDefinedNetworkIPv6Subnet = "2014:100:200::0/60" - customL2IPv4Gateway = "10.128.0.3" + customL2IPv4Gateway = "172.31.0.3" customL2IPv6Gateway = "2014:100:200::3" - customL2IPv4ReservedCIDR = "10.128.1.0/24" + customL2IPv4ReservedCIDR = "172.31.1.0/24" customL2IPv6ReservedCIDR = "2014:100:200::100/120" - customL2IPv4InfraCIDR = "10.128.0.0/30" + customL2IPv4InfraCIDR = "172.31.0.0/30" customL2IPv6InfraCIDR = "2014:100:200::/122" userDefinedNetworkName = "hogwarts" nadName = "gryffindor" @@ -719,7 +719,7 @@ var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { "with L2 primary UDN", "layer2", 4, - "10.128.0.0/29", + "172.31.0.0/29", "2014:100:200::0/125", ), // limit the number of pods to 10 @@ -2403,6 +2403,12 @@ func withLabels(labels map[string]string) podOption { } } +func withAnnotations(annotations map[string]string) podOption { + return func(pod *podConfiguration) { + pod.annotations = annotations + } +} + func withNetworkAttachment(networks []nadapi.NetworkSelectionElement) podOption { return func(pod *podConfiguration) { pod.attachments = networks diff --git a/test/e2e/network_segmentation_api_validations.go b/test/e2e/network_segmentation_api_validations.go index c8d0284a50..ecf0459b02 100644 --- a/test/e2e/network_segmentation_api_validations.go +++ b/test/e2e/network_segmentation_api_validations.go @@ -6,11 +6,12 @@ import ( e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" testscenariocudn "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario/cudn" ) -var _ = Describe("Network Segmentation: API validations", func() { +var _ = Describe("Network Segmentation: API validations", feature.NetworkSegmentation, func() { DescribeTable("api-server should reject invalid CRs", func(scenarios []testscenario.ValidateCRScenario) { DeferCleanup(func() { diff --git a/test/e2e/network_segmentation_default_network_annotation.go b/test/e2e/network_segmentation_default_network_annotation.go index 7ef3416445..4119c03f7e 100644 --- a/test/e2e/network_segmentation_default_network_annotation.go +++ b/test/e2e/network_segmentation_default_network_annotation.go @@ -16,9 +16,10 @@ import ( udnv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" udnclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" ) -var _ = Describe("Network Segmentation: Default network multus annotation", func() { +var _ = Describe("Network Segmentation: Default network multus annotation", feature.NetworkSegmentation, func() { var ( f = wrappedTestFramework("default-network-annotation") ) diff --git a/test/e2e/network_segmentation_endpointslices_mirror.go b/test/e2e/network_segmentation_endpointslices_mirror.go index c952cc73d4..83795a9afa 100644 --- a/test/e2e/network_segmentation_endpointslices_mirror.go +++ b/test/e2e/network_segmentation_endpointslices_mirror.go @@ -28,7 +28,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", feature.Networ f.SkipNamespaceCreation = true Context("a user defined primary network", func() { const ( - userDefinedNetworkIPv4Subnet = "10.128.0.0/16" + userDefinedNetworkIPv4Subnet = "172.31.0.0/16" // last subnet in private range 172.16.0.0/12 (rfc1918) userDefinedNetworkIPv6Subnet = "2014:100:200::0/60" nadName = "gryffindor" ) diff --git a/test/e2e/network_segmentation_localnet.go b/test/e2e/network_segmentation_localnet.go index 1dea5e450e..3129f65687 100644 --- a/test/e2e/network_segmentation_localnet.go +++ b/test/e2e/network_segmentation_localnet.go @@ -20,9 +20,11 @@ import ( e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" ) -var _ = Describe("Network Segmentation: Localnet", func() { +var _ = Describe("Network Segmentation: Localnet", feature.NetworkSegmentation, func() { var ( f = wrappedTestFramework("network-segmentation-localnet") providerCtx infraapi.Context diff --git a/test/e2e/network_segmentation_policy.go b/test/e2e/network_segmentation_policy.go index 0c78d92751..2da9ef862e 100644 --- a/test/e2e/network_segmentation_policy.go +++ b/test/e2e/network_segmentation_policy.go @@ -17,6 +17,7 @@ import ( "k8s.io/apimachinery/pkg/util/rand" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" ) var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.NetworkSegmentation, func() { @@ -26,14 +27,14 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ ginkgo.Context("on a user defined primary network", func() { const ( nadName = "tenant-red" - userDefinedNetworkIPv4Subnet = "10.128.0.0/16" + userDefinedNetworkIPv4Subnet = "172.31.0.0/16" // last subnet in private range 172.16.0.0/12 (rfc1918) userDefinedNetworkIPv6Subnet = "2014:100:200::0/60" - customL2IPv4Gateway = "10.128.0.3" - customL2IPv6Gateway = "2014:100:200::3" - customL2IPv4ReservedCIDR = "10.128.1.0/24" - customL2IPv6ReservedCIDR = "2014:100:200::100/120" - customL2IPv4InfraCIDR = "10.128.0.0/30" - customL2IPv6InfraCIDR = "2014:100:200::/122" + customL2IPv4Gateway = "172.31.0.3" + customL2IPv6Gateway = "2014:100:200::3" + customL2IPv4ReservedCIDR = "172.31.1.0/24" + customL2IPv6ReservedCIDR = "2014:100:200::100/120" + customL2IPv4InfraCIDR = "172.31.0.0/30" + customL2IPv6InfraCIDR = "2014:100:200::/122" nodeHostnameKey = "kubernetes.io/hostname" workerOneNodeName = "ovn-worker" workerTwoNodeName = "ovn-worker2" @@ -102,6 +103,14 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ ginkgo.By("creating client/server pods") serverPodConfig.namespace = f.Namespace.Name clientPodConfig.namespace = f.Namespace.Name + nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.TODO(), cs, 2) + framework.ExpectNoError(err, "") + if len(nodes.Items) < 2 { + ginkgo.Skip("requires at least 2 Nodes") + } + serverPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].GetName()} + clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[1].GetName()} + runUDNPod(cs, f.Namespace.Name, serverPodConfig, nil) runUDNPod(cs, f.Namespace.Name, clientPodConfig, nil) @@ -149,14 +158,12 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ }, *podConfig( "client-pod", - withNodeSelector(map[string]string{nodeHostnameKey: workerOneNodeName}), ), *podConfig( "server-pod", withCommand(func() []string { return httpServerContainerCmd(port) }), - withNodeSelector(map[string]string{nodeHostnameKey: workerTwoNodeName}), ), ), ginkgo.Entry( @@ -172,14 +179,12 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ }, *podConfig( "client-pod", - withNodeSelector(map[string]string{nodeHostnameKey: workerOneNodeName}), ), *podConfig( "server-pod", withCommand(func() []string { return httpServerContainerCmd(port) }), - withNodeSelector(map[string]string{nodeHostnameKey: workerTwoNodeName}), ), ), ginkgo.Entry( @@ -192,14 +197,12 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.Networ }, *podConfig( "client-pod", - withNodeSelector(map[string]string{nodeHostnameKey: workerOneNodeName}), ), *podConfig( "server-pod", withCommand(func() []string { return httpServerContainerCmd(port) }), - withNodeSelector(map[string]string{nodeHostnameKey: workerTwoNodeName}), ), ), ) diff --git a/test/e2e/network_segmentation_preconfigured_layer2.go b/test/e2e/network_segmentation_preconfigured_layer2.go index aa9f6c819d..5f7a32c85c 100644 --- a/test/e2e/network_segmentation_preconfigured_layer2.go +++ b/test/e2e/network_segmentation_preconfigured_layer2.go @@ -2,15 +2,21 @@ package e2e import ( "context" + "encoding/json" "fmt" "net" + "strings" "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" + nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + udnclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned" + + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" ) @@ -102,32 +108,178 @@ var _ = Describe("Network Segmentation: Preconfigured Layer2 UDN", feature.Netwo netConfig: &networkAttachmentConfigParams{ name: "custom-l2-net", topology: "layer2", - cidr: joinStrings("10.128.0.0/16", "2014:100:200::0/60"), + cidr: joinStrings("172.31.0.0/16", "2014:100:200::0/60"), role: "primary", }, - expectedGatewayIPs: []string{"10.128.0.1", "2014:100:200::1"}, + expectedGatewayIPs: []string{"172.31.0.1", "2014:100:200::1"}, }), Entry("Layer2 with custom subnets", testConfig{ netConfig: &networkAttachmentConfigParams{ name: "custom-l2-net", topology: "layer2", - cidr: joinStrings("10.128.0.0/16", "2014:100:200::0/60"), + cidr: joinStrings("172.31.0.0/16", "2014:100:200::0/60"), role: "primary", - defaultGatewayIPs: joinStrings("10.128.0.10", "2014:100:200::100"), - reservedCIDRs: joinStrings("10.128.1.0/24", "2014:100:200::/122"), - infrastructureCIDRs: joinStrings("10.128.0.10/30", "2014:100:200::100/122"), + defaultGatewayIPs: joinStrings("172.31.0.10", "2014:100:200::100"), + reservedCIDRs: joinStrings("172.31.1.0/24", "2014:100:200::/122"), + infrastructureCIDRs: joinStrings("172.31.0.10/30", "2014:100:200::100/122"), }, - expectedGatewayIPs: []string{"10.128.0.10", "2014:100:200::100"}, + expectedGatewayIPs: []string{"172.31.0.10", "2014:100:200::100"}, }), Entry("Layer2 with inverted gateway/management IPs", testConfig{ netConfig: &networkAttachmentConfigParams{ name: "inv-gateway-net", topology: "layer2", - cidr: joinStrings("10.128.0.0/16", "2014:100:200::0/60"), + cidr: joinStrings("172.31.0.0/16", "2014:100:200::0/60"), role: "primary", - defaultGatewayIPs: joinStrings("10.128.0.2", "2014:100:200::2"), + defaultGatewayIPs: joinStrings("172.31.0.2", "2014:100:200::2"), }, - expectedGatewayIPs: []string{"10.128.0.2", "2014:100:200::2"}, + expectedGatewayIPs: []string{"172.31.0.2", "2014:100:200::2"}, }), ) + + Context("duplicate IP validation with primary UDN layer 2 pods", func() { + const ( + duplicateIPv4 = "10.128.0.200/16" + duplicateIPv6 = "2014:100:200::200/60" + ) + + type duplicateIPTestConfig struct { + podIP string + } + + createPodWithStaticIP := func(podName string, staticIPs []string) *v1.Pod { + ips, err := json.Marshal(staticIPs) + Expect(err).NotTo(HaveOccurred(), "Should marshal IPs for annotation") + + podConfig := *podConfig(podName, + withCommand(func() []string { + return []string{"pause"} + }), + withAnnotations(map[string]string{ + "v1.multus-cni.io/default-network": fmt.Sprintf(`[{"name":"default", "namespace":"ovn-kubernetes", "ips": %s}]`, string(ips)), + }), + ) + podConfig.namespace = f.Namespace.Name + + return runUDNPod(cs, f.Namespace.Name, podConfig, nil) + } + + createPodWithStaticIPNoWait := func(podName string, staticIPs []string) *v1.Pod { + ips, err := json.Marshal(staticIPs) + Expect(err).NotTo(HaveOccurred(), "Should marshal IPs for annotation") + + podConfig := *podConfig(podName, + withCommand(func() []string { + return []string{"pause"} + }), + withAnnotations(map[string]string{ + "v1.multus-cni.io/default-network": fmt.Sprintf(`[{"name":"default", "namespace":"ovn-kubernetes", "ips": %s}]`, string(ips)), + }), + ) + podConfig.namespace = f.Namespace.Name + + // Create the pod but don't wait for it to be Running (since it will fail due to duplicate IP) + podSpec := generatePodSpec(podConfig) + createdPod, err := cs.CoreV1().Pods(f.Namespace.Name).Create(context.Background(), podSpec, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + return createdPod + } + + waitForPodDuplicateIPFailure := func(podName string) { + Eventually(func() []v1.Event { + events, err := cs.CoreV1().Events(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{ + FieldSelector: fmt.Sprintf("involvedObject.name=%s", podName), + }) + if err != nil { + return nil + } + return events.Items + }). + WithTimeout(60*time.Second). + WithPolling(2*time.Second). + Should(ContainElement(SatisfyAll( + HaveField("Type", Equal("Warning")), + HaveField("Reason", Equal("ErrorAllocatingPod")), + HaveField("Message", ContainSubstring("provided IP is already allocated")), + )), fmt.Sprintf("Pod %s should fail with IP allocation error", podName)) + } + + BeforeEach(func() { + if !isPreConfiguredUdnAddressesEnabled() { + Skip("ENABLE_PRE_CONF_UDN_ADDR not configured") + } + + namespace, err := f.CreateNamespace(context.TODO(), f.BaseName, map[string]string{ + "e2e-framework": f.BaseName, + RequiredUDNNamespaceLabel: "", + }) + f.Namespace = namespace + Expect(err).NotTo(HaveOccurred()) + }) + + DescribeTable("should fail when creating second pod with duplicate static IP", + func(config duplicateIPTestConfig) { + podIPs := filterCIDRs(f.ClientSet, config.podIP) + + if len(podIPs) == 0 { + Skip("IP family not supported in this environment") + } + + By("Creating the L2 network") + netConfig := &networkAttachmentConfigParams{ + name: "duplicate-ip-test-net", + topology: "layer2", + cidr: joinStrings("10.128.0.0/16", "2014:100:200::0/60"), + role: "primary", + namespace: f.Namespace.Name, + } + filterSupportedNetworkConfig(f.ClientSet, netConfig) + udnManifest := generateUserDefinedNetworkManifest(netConfig, f.ClientSet) + cleanup, err := createManifest(netConfig.namespace, udnManifest) + Expect(err).NotTo(HaveOccurred()) + DeferCleanup(cleanup) + Eventually(userDefinedNetworkReadyFunc(f.DynamicClient, netConfig.namespace, netConfig.name), 5*time.Second, time.Second).Should(Succeed()) + + By("Creating first pod with static IP") + pod1 := createPodWithStaticIP("test-pod-1", podIPs) + + By("Verifying first pod gets the requested static IP") + pod1, err = cs.CoreV1().Pods(f.Namespace.Name).Get(context.Background(), pod1.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + netStatus, err := podNetworkStatus(pod1, func(status nadapi.NetworkStatus) bool { + return status.Default + }) + Expect(err).NotTo(HaveOccurred(), "Should get network status from pod") + Expect(netStatus).To(HaveLen(1), "Should have one network status for the default network") + + var expectedPodIPs []string + for _, ip := range podIPs { + expectedPodIPs = append(expectedPodIPs, strings.Split(ip, "/")[0]) + } + Expect(netStatus[0].IPs).To(ConsistOf(expectedPodIPs), "Should have the IPs specified in the default network annotation") + + By("Creating second pod with duplicate IP - should fail") + pod2 := createPodWithStaticIPNoWait("test-pod-2", podIPs) + + By("Verifying second pod fails with duplicate IP allocation error") + waitForPodDuplicateIPFailure(pod2.Name) + + By("Verifying first pod is still running normally") + Eventually(func() v1.PodPhase { + updatedPod, err := cs.CoreV1().Pods(f.Namespace.Name).Get(context.Background(), pod1.Name, metav1.GetOptions{}) + if err != nil { + return v1.PodFailed + } + return updatedPod.Status.Phase + }, 30*time.Second, 5*time.Second).Should(Equal(v1.PodRunning)) + }, + Entry("IPv4 duplicate", duplicateIPTestConfig{ + podIP: duplicateIPv4, + }), + Entry("IPv6 duplicate", duplicateIPTestConfig{ + podIP: duplicateIPv6, + }), + ) + }) }) diff --git a/test/e2e/network_segmentation_services.go b/test/e2e/network_segmentation_services.go index 0e0a2a648c..29e9b730e3 100644 --- a/test/e2e/network_segmentation_services.go +++ b/test/e2e/network_segmentation_services.go @@ -41,13 +41,13 @@ var _ = Describe("Network Segmentation: services", feature.NetworkSegmentation, nadName = "tenant-red" servicePort = 88 serviceTargetPort = 80 - userDefinedNetworkIPv4Subnet = "10.128.0.0/16" + userDefinedNetworkIPv4Subnet = "172.31.0.0/16" // last subnet in private range 172.16.0.0/12 (rfc1918) userDefinedNetworkIPv6Subnet = "2014:100:200::0/60" - customL2IPv4Gateway = "10.128.0.3" + customL2IPv4Gateway = "172.31.0.3" customL2IPv6Gateway = "2014:100:200::3" - customL2IPv4ReservedCIDR = "10.128.1.0/24" + customL2IPv4ReservedCIDR = "172.31.1.0/24" customL2IPv6ReservedCIDR = "2014:100:200::100/120" - customL2IPv4InfraCIDR = "10.128.0.0/30" + customL2IPv4InfraCIDR = "172.31.0.0/30" customL2IPv6InfraCIDR = "2014:100:200::/122" ) diff --git a/test/e2e/networkqos.go b/test/e2e/networkqos.go index 5dd65229c6..a332483f1d 100644 --- a/test/e2e/networkqos.go +++ b/test/e2e/networkqos.go @@ -10,10 +10,11 @@ import ( "strings" "time" - "golang.org/x/sync/errgroup" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "golang.org/x/sync/errgroup" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,7 +23,7 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" ) -var _ = ginkgo.Describe("e2e NetworkQoS validation", func() { +var _ = ginkgo.Describe("e2e NetworkQoS validation", feature.NetworkQos, func() { const ( podImage = "ghcr.io/nicolaka/netshoot:v0.13" networkQoSYaml = "networkqos.yaml" diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index 5f9e53a8cc..95dfc5e7c3 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -125,17 +125,19 @@ var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is framework.ExpectNoError(err, "must get bgpnet subnets") framework.Logf("the network cidrs to be imported are v4=%s and v6=%s", externalServerV4CIDR, externalServerV6CIDR) for _, node := range nodes.Items { - ipVer := "" - bgpRouteCommand := strings.Split(fmt.Sprintf("ip%s route show %s", ipVer, externalServerV4CIDR), " ") - framework.Logf("Checking for server's route in node %s", node.Name) - gomega.Eventually(func() bool { - routes, err := infraprovider.Get().ExecK8NodeCommand(node.GetName(), bgpRouteCommand) - framework.ExpectNoError(err, "failed to get BGP routes from node") - framework.Logf("Routes in node %s", routes) - return strings.Contains(routes, frrContainerIPv4) - }, 30*time.Second).Should(gomega.BeTrue()) - if isDualStackCluster(nodes) { - ipVer = " -6" + if isIPv4Supported(f.ClientSet) { + ipVer := "" + bgpRouteCommand := strings.Split(fmt.Sprintf("ip%s route show %s", ipVer, externalServerV4CIDR), " ") + framework.Logf("Checking for server's route in node %s", node.Name) + gomega.Eventually(func() bool { + routes, err := infraprovider.Get().ExecK8NodeCommand(node.GetName(), bgpRouteCommand) + framework.ExpectNoError(err, "failed to get BGP routes from node") + framework.Logf("Routes in node %s", routes) + return strings.Contains(routes, frrContainerIPv4) + }, 30*time.Second).Should(gomega.BeTrue()) + } + if isIPv6Supported(f.ClientSet) { + ipVer := " -6" nodeIPv6LLA, err := GetNodeIPv6LinkLocalAddressForEth0(routerContainerName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) bgpRouteCommand := strings.Split(fmt.Sprintf("ip%s route show %s", ipVer, externalServerV6CIDR), " ") @@ -352,17 +354,19 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert gomega.Expect(err).NotTo(gomega.HaveOccurred()) } for _, node := range nodes.Items { - ipVer := "" - bgpRouteCommand := strings.Split(fmt.Sprintf("ip%s route show %s", ipVer, externalServerV4CIDR), " ") - framework.Logf("Checking for server's route in node %s", node.Name) - gomega.Eventually(func() bool { - routes, err := infraprovider.Get().ExecK8NodeCommand(node.GetName(), bgpRouteCommand) - framework.ExpectNoError(err, "failed to get BGP routes from node") - framework.Logf("Routes in node %s", routes) - return strings.Contains(routes, frrContainerIPv4) - }, 30*time.Second).Should(gomega.BeTrue()) - if isDualStackCluster(nodes) { - ipVer = " -6" + if isIPv4Supported(f.ClientSet) { + ipVer := "" + bgpRouteCommand := strings.Split(fmt.Sprintf("ip%s route show %s", ipVer, externalServerV4CIDR), " ") + framework.Logf("Checking for server's route in node %s", node.Name) + gomega.Eventually(func() bool { + routes, err := infraprovider.Get().ExecK8NodeCommand(node.GetName(), bgpRouteCommand) + framework.ExpectNoError(err, "failed to get BGP routes from node") + framework.Logf("Routes in node %s", routes) + return strings.Contains(routes, frrContainerIPv4) + }, 30*time.Second).Should(gomega.BeTrue()) + } + if isIPv6Supported(f.ClientSet) { + ipVer := " -6" bgpRouteCommand := strings.Split(fmt.Sprintf("ip%s route show %s", ipVer, externalServerV6CIDR), " ") framework.Logf("Checking for server's route in node %s", node.Name) gomega.Eventually(func() bool { @@ -395,7 +399,14 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert 60*time.Second) framework.ExpectNoError(err, fmt.Sprintf("Testing pod to external traffic failed: %v", err)) if isIPv6Supported(f.ClientSet) && utilnet.IsIPv6String(serverContainerIP) { - podIP, err = getPodAnnotationIPsForAttachmentByIndex(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 1) + if isIPv4Supported(f.ClientSet) && isIPv6Supported(f.ClientSet) { + // for dualstack we need to fetch the IP at index1 + // if singlestack IPV6 the original podIP at index0 is the correct one + // FIXME: This util call assumes the first index will always be the IPv4 address + // and second index will always be the IPv6 address + // which is not always the case. + podIP, err = getPodAnnotationIPsForAttachmentByIndex(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 1) + } // For IPv6 addresses, need to handle the brackets in the output outputIP := strings.TrimPrefix(strings.Split(stdout, "]:")[0], "[") gomega.Expect(outputIP).To(gomega.Equal(podIP), @@ -497,10 +508,6 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks", feature.RouteAdvertisements, func(cudnATemplate, cudnBTemplate *udnv1.ClusterUserDefinedNetwork) { const curlConnectionTimeoutCode = "28" - const ( - ipFamilyV4 = iota - ipFamilyV6 - ) f := wrappedTestFramework("bgp-network-isolation") f.SkipNamespaceCreation = true @@ -752,7 +759,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" }) ginkgo.DescribeTable("connectivity between networks", - func(connInfo func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool)) { + func(connInfo func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool)) { // checkConnectivity performs a curl command from a specified client (pod or node) // to targetAddress. If clientNamespace is empty the function assumes clientName is a node that will be used as the // client. @@ -783,28 +790,16 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" framework.Logf("Connectivity check successful:'%s' -> %s", client, targetAddress) return out, nil } - clientName, clientNamespace, dst, expectedOutput, expectErr := connInfo(ipFamilyV4) - - asyncAssertion := gomega.Eventually - timeout := time.Second * 30 - if expectErr { - // When the connectivity check is expected to fail it should be failing consistently - asyncAssertion = gomega.Consistently - timeout = time.Second * 15 - } - asyncAssertion(func() error { - out, err := checkConnectivity(clientName, clientNamespace, dst) - if expectErr != (err != nil) { - return fmt.Errorf("expected connectivity check to return error(%t), got %v, output %v", expectErr, err, out) - } - if expectedOutput != "" { - if !strings.Contains(out, expectedOutput) { - return fmt.Errorf("expected connectivity check to contain %q, got %q", expectedOutput, out) - } + for _, ipFamily := range getSupportedIPFamiliesSlice(f.ClientSet) { + clientName, clientNamespace, dst, expectedOutput, expectErr := connInfo(ipFamily) + asyncAssertion := gomega.Eventually + timeout := time.Second * 30 + if expectErr { + // When the connectivity check is expected to fail it should be failing consistently + asyncAssertion = gomega.Consistently + timeout = time.Second * 15 } - if isIPv6Supported(f.ClientSet) && isIPv4Supported(f.ClientSet) { - // use ipFamilyIndex of 1 to pick the IPv6 addresses - clientName, clientNamespace, dst, expectedOutput, expectErr := connInfo(ipFamilyV6) + asyncAssertion(func() error { out, err := checkConnectivity(clientName, clientNamespace, dst) if expectErr != (err != nil) { return fmt.Errorf("expected connectivity check to return error(%t), got %v, output %v", expectErr, err, out) @@ -814,12 +809,12 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" return fmt.Errorf("expected connectivity check to contain %q, got %q", expectedOutput, out) } } - } - return nil - }, timeout).Should(gomega.BeNil()) + return nil + }, timeout).Should(gomega.BeNil()) + } }, ginkgo.Entry("pod to pod on the same network and same node should work", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { // podsNetA[0] and podsNetA[1] are on the same node clientPod := podsNetA[0] srvPod := podsNetA[1] @@ -828,10 +823,11 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" framework.ExpectNoError(err) srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) - return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", clientPodStatus.IPs[ipFamilyIndex].IP.String(), false + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(getFirstCIDROfFamily(ipFamily, srvPodStatus.IPs).IP.String(), "8080") + "/clientip", + getFirstCIDROfFamily(ipFamily, clientPodStatus.IPs).IP.String(), false }), ginkgo.Entry("pod to pod on the same network and different nodes should work", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { // podsNetA[0] and podsNetA[2] are on different nodes clientPod := podsNetA[0] srvPod := podsNetA[2] @@ -840,10 +836,11 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" framework.ExpectNoError(err) srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) - return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", clientPodStatus.IPs[ipFamilyIndex].IP.String(), false + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(getFirstCIDROfFamily(ipFamily, srvPodStatus.IPs).IP.String(), "8080") + "/clientip", + getFirstCIDROfFamily(ipFamily, clientPodStatus.IPs).IP.String(), false }), ginkgo.Entry("pod to pod connectivity on different networks and same node", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { // podsNetA[2] and podNetB are on the same node clientPod := podsNetA[2] srvPod := podNetB @@ -862,18 +859,18 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" framework.ExpectNoError(err) // With the above underlay routing configuration client pod can reach server pod. - curlOutput = clientPodStatus.IPs[ipFamilyIndex].IP.String() + curlOutput = getFirstCIDROfFamily(ipFamily, clientPodStatus.IPs).IP.String() curlErr = false } else { curlOutput = curlConnectionTimeoutCode curlErr = true } - return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(getFirstCIDROfFamily(ipFamily, srvPodStatus.IPs).IP.String(), "8080") + "/clientip", curlOutput, curlErr }), ginkgo.Entry("pod to pod connectivity on different networks and different nodes", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { // podsNetA[0] and podNetB are on different nodes clientPod := podsNetA[0] srvPod := podNetB @@ -888,44 +885,48 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPodStatus, err := getPodAnnotationForAttachment(clientPod, namespacedName(clientPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) - curlOutput = clientPodStatus.IPs[ipFamilyIndex].IP.String() + curlOutput = getFirstCIDROfFamily(ipFamily, clientPodStatus.IPs).IP.String() curlErr = false } else { curlOutput = curlConnectionTimeoutCode curlErr = true } - return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlOutput, curlErr + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(getFirstCIDROfFamily(ipFamily, srvPodStatus.IPs).IP.String(), "8080") + "/clientip", + curlOutput, curlErr }), ginkgo.Entry("pod in the default network should not be able to access an advertised UDN pod on the same node", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { // podNetDefault and podNetB are on the same node clientPod := podNetDefault srvPod := podNetB srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) framework.ExpectNoError(err) - return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(getFirstCIDROfFamily(ipFamily, srvPodStatus.IPs).IP.String(), "8080") + "/clientip", + curlConnectionTimeoutCode, true }), ginkgo.Entry("pod in the default network should not be able to access an advertised UDN pod on a different node", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { // podNetDefault and podsNetA[0] are on different nodes clientPod := podNetDefault srvPod := podsNetA[0] srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) - return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(getFirstCIDROfFamily(ipFamily, srvPodStatus.IPs).IP.String(), "8080") + "/clientip", + curlConnectionTimeoutCode, true }), ginkgo.Entry("pod in the default network should not be able to access a UDN service", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { - return podNetDefault.Name, podNetDefault.Namespace, net.JoinHostPort(svcNetA.Spec.ClusterIPs[ipFamilyIndex], "8080") + "/clientip", curlConnectionTimeoutCode, true + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + return podNetDefault.Name, podNetDefault.Namespace, net.JoinHostPort(getFirstIPStringOfFamily(ipFamily, svcNetA.Spec.ClusterIPs), "8080") + "/clientip", + curlConnectionTimeoutCode, true }), ginkgo.Entry("pod in the UDN should be able to access a service in the same network", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { - return podsNetA[0].Name, podsNetA[0].Namespace, net.JoinHostPort(svcNetA.Spec.ClusterIPs[ipFamilyIndex], "8080") + "/clientip", "", false + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + return podsNetA[0].Name, podsNetA[0].Namespace, net.JoinHostPort(getFirstIPStringOfFamily(ipFamily, svcNetA.Spec.ClusterIPs), "8080") + "/clientip", "", false }), ginkgo.Entry("pod in the UDN should not be able to access a default network service", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { err := true out := curlConnectionTimeoutCode if cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 { @@ -935,41 +936,66 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" // this causes curl timeout with code 7 host unreachable instead of code 28 out = "" } - return podsNetA[0].Name, podsNetA[0].Namespace, net.JoinHostPort(svcNetDefault.Spec.ClusterIPs[ipFamilyIndex], "8080") + "/clientip", out, err + return podsNetA[0].Name, podsNetA[0].Namespace, net.JoinHostPort(getFirstIPStringOfFamily(ipFamily, svcNetDefault.Spec.ClusterIPs), "8080") + "/clientip", out, err }), ginkgo.Entry("pod in the UDN should be able to access kapi in default network service", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { return podsNetA[0].Name, podsNetA[0].Namespace, "https://kubernetes.default/healthz", "", false }), + ginkgo.Entry("pod in the UDN should be able to access kapi service cluster IP directly", + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + // Get kubernetes service from default namespace + kubernetesService, err := f.ClientSet.CoreV1().Services("default").Get(context.TODO(), "kubernetes", metav1.GetOptions{}) + framework.ExpectNoError(err, "should be able to get kubernetes service") + + // NOTE: See https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/2438-dual-stack-apiserver + // Today the kubernetes.default service is single-stack and cannot be dual-stack. + if isDualStackCluster(nodes) && ipFamily == utilnet.IPv6 { + e2eskipper.Skipf("Dual stack kubernetes.default service is not supported in kubernetes") + } + // Get the cluster IP for the specified IP family + clusterIP := getFirstIPStringOfFamily(ipFamily, kubernetesService.Spec.ClusterIPs) + gomega.Expect(clusterIP).NotTo(gomega.BeEmpty(), fmt.Sprintf("no cluster IP available for IP family %v", ipFamily)) + + // Access the kubernetes API at the cluster IP directly on port 443 + return podsNetA[0].Name, podsNetA[0].Namespace, fmt.Sprintf("https://%s/healthz", net.JoinHostPort(clusterIP, "443")), "", false + }), ginkgo.Entry("pod in the UDN should not be able to access a service in a different UDN", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { - return podsNetA[0].Name, podsNetA[0].Namespace, net.JoinHostPort(svcNetB.Spec.ClusterIPs[ipFamilyIndex], "8080") + "/clientip", curlConnectionTimeoutCode, true + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + return podsNetA[0].Name, podsNetA[0].Namespace, net.JoinHostPort(getFirstIPStringOfFamily(ipFamily, svcNetB.Spec.ClusterIPs), "8080") + "/clientip", + curlConnectionTimeoutCode, true }), ginkgo.Entry("host to a local UDN pod should not work", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { clientNode := podsNetA[0].Spec.NodeName srvPod := podsNetA[0] srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) - return clientNode, "", net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true + return clientNode, "", net.JoinHostPort(getFirstCIDROfFamily(ipFamily, srvPodStatus.IPs).IP.String(), "8080") + "/clientip", + curlConnectionTimeoutCode, true }), ginkgo.Entry("host to a different node UDN pod should not work", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { // podsNetA[0] and podsNetA[2] are on different nodes clientNode := podsNetA[2].Spec.NodeName srvPod := podsNetA[0] srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) - return clientNode, "", net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true + return clientNode, "", net.JoinHostPort(getFirstCIDROfFamily(ipFamily, srvPodStatus.IPs).IP.String(), "8080") + "/clientip", + curlConnectionTimeoutCode, true }), ginkgo.Entry("UDN pod to local node should not work", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { clientPod := podsNetA[0] node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), clientPod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) - nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodeIPv4, nodeIPv6 := getNodeAddresses(node) + nodeIP := nodeIPv4 + if ipFamily == utilnet.IPv6 { + nodeIP = nodeIPv6 + } // FIXME: add the host process socket to the VRF for this test to work. // This scenario is something that is not supported yet. So the test will continue to fail. // This works the same on both normal UDNs and advertised UDNs. @@ -991,52 +1017,72 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(hostNetworkPort)) + "/hostname", "", true }), ginkgo.Entry("UDN pod to a different node should work", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { clientPod := podsNetA[0] // podsNetA[0] and podsNetA[2] are on different nodes so we can pick the node of podsNetA[2] as the different node destination node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), podsNetA[2].Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) - nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodeIPv4, nodeIPv6 := getNodeAddresses(node) + nodeIP := nodeIPv4 + if ipFamily == utilnet.IPv6 { + nodeIP = nodeIPv6 + } clientNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), clientPod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) - clientNodeIP := clientNode.Status.Addresses[ipFamilyIndex].Address + clientNodeIPv4, clientNodeIPv6 := getNodeAddresses(clientNode) + clientNodeIP := clientNodeIPv4 + if ipFamily == utilnet.IPv6 { + clientNodeIP = clientNodeIPv6 + } // pod -> node traffic should use the node's IP as the source for advertised UDNs. return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(hostNetworkPort)) + "/clientip", clientNodeIP, false }), ginkgo.Entry("UDN pod to the same node nodeport service in default network should not work", // FIXME: https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5410 - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { clientPod := podsNetA[0] // podsNetA[0] is on nodes[0]. We need the same node. Let's hit the nodeport on nodes[0]. node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[0].Name, metav1.GetOptions{}) framework.ExpectNoError(err) - nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodeIPv4, nodeIPv6 := getNodeAddresses(node) + nodeIP := nodeIPv4 + if ipFamily == utilnet.IPv6 { + nodeIP = nodeIPv6 + } nodePort := svcNetDefault.Spec.Ports[0].NodePort return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", curlConnectionTimeoutCode, true }), ginkgo.Entry("UDN pod to a different node nodeport service in default network should work", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { clientPod := podsNetA[0] // podsNetA[0] is on nodes[0]. We need a different node. podNetDefault is on nodes[1]. // The service is backed by podNetDefault. Let's hit the nodeport on nodes[2]. node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[2].Name, metav1.GetOptions{}) framework.ExpectNoError(err) - nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodeIPv4, nodeIPv6 := getNodeAddresses(node) + nodeIP := nodeIPv4 + if ipFamily == utilnet.IPv6 { + nodeIP = nodeIPv6 + } nodePort := svcNetDefault.Spec.Ports[0].NodePort return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", "", false }), ginkgo.Entry("UDN pod to the same node nodeport service in same UDN network should work", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { clientPod := podsNetA[0] // The service is backed by pods in podsNetA. // We want to hit the nodeport on the same node. // client is on nodes[0]. Let's hit nodeport on nodes[0]. node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[0].Name, metav1.GetOptions{}) framework.ExpectNoError(err) - nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodeIPv4, nodeIPv6 := getNodeAddresses(node) + nodeIP := nodeIPv4 + if ipFamily == utilnet.IPv6 { + nodeIP = nodeIPv6 + } nodePort := svcNetA.Spec.Ports[0].NodePort // The service can be backed by any of the pods in podsNetA, so we can't reliably check the output hostname. @@ -1044,14 +1090,18 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", "", false }), ginkgo.Entry("UDN pod to a different node nodeport service in same UDN network should work", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { clientPod := podsNetA[0] // The service is backed by pods in podsNetA. // We want to hit the nodeport on a different node. // client is on nodes[0]. Let's hit nodeport on nodes[2]. node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[2].Name, metav1.GetOptions{}) framework.ExpectNoError(err) - nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodeIPv4, nodeIPv6 := getNodeAddresses(node) + nodeIP := nodeIPv4 + if ipFamily == utilnet.IPv6 { + nodeIP = nodeIPv6 + } nodePort := svcNetA.Spec.Ports[0].NodePort // sourceIP will be joinSubnetIP for nodeports, so only using hostname endpoint @@ -1070,15 +1120,19 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" // fails as it doesn't know how to reach this masqueradeIP. // There is also inconsistency in behaviour within Layer2 networks for how IPv4 works and how IPv6 works where the traffic // works on ipv6 because of the flows described below. - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { clientPod := podsNetA[0] node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[0].Name, metav1.GetOptions{}) framework.ExpectNoError(err) - nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodeIPv4, nodeIPv6 := getNodeAddresses(node) + nodeIP := nodeIPv4 + if ipFamily == utilnet.IPv6 { + nodeIP = nodeIPv6 + } nodePort := svcNetB.Spec.Ports[0].NodePort out := curlConnectionTimeoutCode errBool := true - if ipFamilyIndex == ipFamilyV6 && cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 { + if ipFamily == utilnet.IPv6 && cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 { // For Layer2 networks, we have these flows we add on breth0: // cookie=0xdeff105, duration=173.245s, table=1, n_packets=0, n_bytes=0, idle_age=173, priority=14,icmp6,icmp_type=134 actions=FLOOD // cookie=0xdeff105, duration=173.245s, table=1, n_packets=8, n_bytes=640, idle_age=4, priority=14,icmp6,icmp_type=136 actions=FLOOD @@ -1095,14 +1149,18 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", out, errBool }), ginkgo.Entry("UDN pod to a different node nodeport service in different UDN network should work", - func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + func(ipFamily utilnet.IPFamily) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { clientPod := podsNetA[0] // The service is backed by podNetB. // We want to hit the nodeport on a different node from the client. // client is on nodes[0]. Let's hit nodeport on nodes[2]. node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[2].Name, metav1.GetOptions{}) framework.ExpectNoError(err) - nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodeIPv4, nodeIPv6 := getNodeAddresses(node) + nodeIP := nodeIPv4 + if ipFamily == utilnet.IPv6 { + nodeIP = nodeIPv6 + } nodePort := svcNetB.Spec.Ports[0].NodePort // sourceIP will be joinSubnetIP for nodeports, so only using hostname endpoint @@ -2366,8 +2424,8 @@ func checkL3NodePodRoute(node corev1.Node, serverContainerIP, routerContainerNam if isIPv6 { podCIDR = podv6CIDR } - gomega.Expect(podCIDR).NotTo(gomega.BeEmpty(), - "pod CIDR for family (isIPv6=%t) missing for node %s on network %s", isIPv6, node.Name, netName) + gomega.Expect(podCIDR).NotTo(gomega.BeEmpty(), + "pod CIDR for family (isIPv6=%t) missing for node %s on network %s", isIPv6, node.Name, netName) checkRouteInFRR(node, podCIDR, routerContainerName, isIPv6) } diff --git a/test/e2e/service.go b/test/e2e/service.go index 82bcd1d33a..533f11f151 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -515,7 +515,7 @@ var _ = ginkgo.Describe("Services", feature.Service, func() { // TODO: Revisit this once https://bugzilla.redhat.com/show_bug.cgi?id=2169839 is fixed. ovnKubernetesNamespace := deploymentconfig.Get().OVNKubernetesNamespace() ovnKubeNodePods, err := f.ClientSet.CoreV1().Pods(ovnKubernetesNamespace).List(context.TODO(), metav1.ListOptions{ - LabelSelector: "name=ovnkube-node", + LabelSelector: "app=ovnkube-node", }) if err != nil { framework.Failf("could not get ovnkube-node pods: %v", err) @@ -1693,10 +1693,10 @@ metadata: svcLoadBalancerIP, err := getServiceLoadBalancerIP(f.ClientSet, namespaceName, svcName) framework.ExpectNoError(err, fmt.Sprintf("failed to get service lb ip: %s for %s, err: %v", svcLoadBalancerIP, svcName, err)) - discoveryClient := f.ClientSet.DiscoveryV1() + discoveryClient := f.ClientSet.DiscoveryV1() endpointSlice, err := discoveryClient.EndpointSlices(namespaceName).List(context.TODO(), metav1.ListOptions{ - LabelSelector: fmt.Sprintf("kubernetes.io/service-name=%s", svcName), - }) + LabelSelector: fmt.Sprintf("kubernetes.io/service-name=%s", svcName), + }) framework.ExpectNoError(err, fmt.Sprintf("failed to get endpoints slice for service %s", svcName)) gomega.Expect(endpointSlice).NotTo(gomega.BeNil()) gomega.Expect(len(endpointSlice.Items)).To(gomega.Equal(1)) @@ -1706,7 +1706,7 @@ metadata: nodeName := *endpointSlice.Items[0].Endpoints[0].NodeName nodeIP, err := getNodeIP(f.ClientSet, nodeName) framework.ExpectNoError(err, fmt.Sprintf("failed to get endpoint's %s node ip address", nodeIP)) - + svcIPforCurl := svcLoadBalancerIP if !utilnet.IsIPv6String(svcLoadBalancerIP) { ginkgo.By("Setting up external IPv4 client with an intermediate node") @@ -1728,7 +1728,7 @@ metadata: // Use Eventually because IPv6 takes a while to finish its network configuration // with network namespaces. // TODO: Figure out why keeping this at 5seconds is causing CI flakes after K8s 1.33 rebase - // See: https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5455 + // See: https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5455 gomega.Eventually(func() error { return buildAndRunCommand(fmt.Sprintf("sudo ip netns exec client curl %s:%d/big.iso -o big.iso", svcIPforCurl, endpointHTTPPort)) }, 10*time.Second).Should(gomega.BeNil(), "failed to connect with external load balancer service") @@ -1743,7 +1743,6 @@ metadata: gomega.Eventually(func() error { return buildAndRunCommand(fmt.Sprintf("sudo ip netns exec client curl %s:%d/big.iso -o big.iso", svcIPforCurl, endpointHTTPPort)) }, 5*time.Second).Should(gomega.BeNil(), "failed to connect with external load balancer service after changing mtu size") - }) ginkgo.It("Should ensure load balancer service works with pmtud", func() { diff --git a/test/e2e/util.go b/test/e2e/util.go index f9672f71e9..569674597a 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -6,6 +6,7 @@ import ( "fmt" "math/rand" "net" + "net/netip" "os" "path/filepath" "regexp" @@ -14,6 +15,7 @@ import ( "text/template" "time" + nadapi "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -823,6 +825,71 @@ func assertACLLogs(targetNodeName string, policyNameRegex string, expectedACLVer return false, nil } +// getExternalContainerInterfaceIPsOnNetwork returns the IPv4 and IPv6 addresses (if any) +// of the given external container on the specified provider network. +func getExternalContainerInterfaceIPsOnNetwork(containerName, networkName string) (string, string, error) { + netw, err := infraprovider.Get().GetNetwork(networkName) + if err != nil { + return "", "", fmt.Errorf("failed to get provider network %q: %w", networkName, err) + } + ni, err := infraprovider.Get().GetExternalContainerNetworkInterface( + infraapi.ExternalContainer{Name: containerName}, + netw, + ) + if err != nil { + return "", "", fmt.Errorf("failed to get network interface for container %q on network %q: %w", containerName, netw.Name(), err) + } + return ni.IPv4, ni.IPv6, nil +} + +// getExternalContainerInterfaceIPs returns IPv4 and IPv6 addresses configured +// on the given interface inside the given external container. This is useful +// for manually-configured interfaces like VLAN interfaces. +func getExternalContainerInterfaceIPs(containerName, ifaceName string) ([]string, []string, error) { + container := infraapi.ExternalContainer{Name: containerName} + + // Replicates the relevant fields from the json output by "ip -j addr show" + type addrInfo struct { + Family string `json:"family"` + Local string `json:"local"` + Scope string `json:"scope"` + } + type ipAddrJSON struct { + AddrInfo []addrInfo `json:"addr_info"` + } + + out, err := infraprovider.Get().ExecExternalContainerCommand( + container, []string{"ip", "-j", "addr", "show", "dev", ifaceName}) + if err != nil { + return nil, nil, fmt.Errorf("failed to exec on container %q: %w", containerName, err) + } + var parsed []ipAddrJSON + if err := json.Unmarshal([]byte(out), &parsed); err != nil { + return nil, nil, fmt.Errorf("failed to parse ip -j output: %w", err) + } + + var v4, v6 []string + for _, entry := range parsed { + for _, ai := range entry.AddrInfo { + if ai.Local == "" { + continue + } + // Skip link-local/host-scoped addresses + if ai.Scope == "link" || ai.Scope == "host" { + continue + } + switch ai.Family { + case "inet": + v4 = append(v4, ai.Local) + case "inet6": + v6 = append(v6, ai.Local) + } + } + } + + return v4, v6, nil +} + // patchServiceStringValue patches service serviceName in namespace serviceNamespace with provided string value. func patchServiceStringValue(c kubernetes.Interface, serviceName, serviceNamespace, jsonPath, value string) error { patch := []struct { @@ -1534,3 +1601,151 @@ func isDNSNameResolverEnabled() bool { val, present := os.LookupEnv("OVN_ENABLE_DNSNAMERESOLVER") return present && val == "true" } + +// Given a node name, returns the host subnets (IPv4/IPv6) of the node primary interface +// as annotated by OVN-Kubernetes. The returned slice may contain zero, one, or two CIDRs. +func getHostSubnetsForNode(cs clientset.Interface, nodeName string) ([]string, error) { + node, err := cs.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get node %s: %v", nodeName, err) + } + nodeIfAddr, err := util.GetNodeIfAddrAnnotation(node) + if err != nil { + return nil, err + } + hostSubnets := []string{} + if nodeIfAddr.IPv4 != "" { + ip, ipNet, err := net.ParseCIDR(nodeIfAddr.IPv4) + if err != nil { + return nil, fmt.Errorf("failed to parse IPv4 address %s: %v", nodeIfAddr.IPv4, err) + } + ipNet.IP = ip.Mask(ipNet.Mask) + hostSubnets = append(hostSubnets, ipNet.String()) + } + if nodeIfAddr.IPv6 != "" { + ip, ipNet, err := net.ParseCIDR(nodeIfAddr.IPv6) + if err != nil { + return nil, fmt.Errorf("failed to parse IPv6 address %s: %v", nodeIfAddr.IPv6, err) + } + ipNet.IP = ip.Mask(ipNet.Mask) + hostSubnets = append(hostSubnets, ipNet.String()) + } + return hostSubnets, nil +} + +// normalizeIP removes CIDR notation from an IP address if present and validates/normalizes the IP format. +// For example, "10.0.0.2/24" becomes "10.0.0.2". +func normalizeIP(s string) (string, error) { + if s == "" { + return s, nil + } + if p, err := netip.ParsePrefix(s); err == nil { + return p.Addr().String(), nil + } + if a, err := netip.ParseAddr(s); err == nil { + return a.String(), nil + } + return "", fmt.Errorf("invalid IP address: %s", s) +} + +func normalizeIPAddresses(ips []string) ([]string, error) { + normalized := make([]string, len(ips)) + for i, ip := range ips { + normalizedIP, err := normalizeIP(ip) + if err != nil { + return nil, fmt.Errorf("failed to normalize IP addresses: %w", err) + } + normalized[i] = normalizedIP + } + return normalized, nil +} + +// getNetworkInterfaceName extracts the interface name from a pod's network-status annotation +// If the pod is host-networked, it returns eth0. +// If the pod has attachments, it finds the interface for the specified network +// If the pod has no attachments, it returns the default network interface +func getNetworkInterfaceName(pod *v1.Pod, podConfig podConfiguration, netConfigName string) (string, error) { + var predicate func(nadapi.NetworkStatus) bool + if podConfig.hostNetwork { + return "eth0", nil + } + if len(podConfig.attachments) > 0 { + // Pod has attachments - find the specific network interface + expectedNetworkName := fmt.Sprintf("%s/%s", podConfig.namespace, netConfigName) + predicate = func(status nadapi.NetworkStatus) bool { + return status.Name == expectedNetworkName + } + } else { + // Pod has no attachments - find the default network interface + predicate = func(status nadapi.NetworkStatus) bool { + return status.Name == "ovn-kubernetes" || status.Default + } + } + networkStatuses, err := podNetworkStatus(pod, predicate) + if err != nil { + return "", fmt.Errorf("failed to get network status from pod %s/%s: %w", pod.Namespace, pod.Name, err) + } + if len(networkStatuses) == 0 { + if len(podConfig.attachments) > 0 { + return "", fmt.Errorf("no network interface found for network %s/%s", podConfig.namespace, netConfigName) + } + return "", fmt.Errorf("no default network interface found") + } + if len(networkStatuses) > 1 { + return "", fmt.Errorf("multiple network interfaces found matching criteria") + } + iface := networkStatuses[0].Interface + // Multus may omit Interface for the default network; default to eth0. + if iface == "" && len(podConfig.attachments) == 0 { + return "eth0", nil + } + return iface, nil +} + +// findOVNDBLeaderPod finds the ovnkube-db pod that is currently the northbound database leader +func findOVNDBLeaderPod(f *framework.Framework, cs clientset.Interface, namespace string) (*v1.Pod, error) { + dbPods, err := cs.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: "ovn-db-pod=true"}) + if err != nil { + return nil, fmt.Errorf("failed to list ovnkube-db pods: %v", err) + } + + if len(dbPods.Items) == 0 { + return nil, fmt.Errorf("no ovnkube-db pods found") + } + + if len(dbPods.Items) == 1 { + return &dbPods.Items[0], nil + } + + for i := range dbPods.Items { + pod := &dbPods.Items[i] + if pod.Status.Phase != v1.PodRunning { + continue + } + + stdout, stderr, err := ExecCommandInContainerWithFullOutput(f, namespace, pod.Name, "nb-ovsdb", + "ovsdb-client", "query", "unix:/var/run/openvswitch/ovnnb_db.sock", + `["_Server", {"op":"select", "table":"Database", "where":[["name", "==", "OVN_Northbound"]], "columns": ["leader"]}]`) + + if err != nil { + framework.Logf("Warning: Failed to query leader status on pod %s: %v, stderr: %s", pod.Name, err, stderr) + continue + } + + // Parse the JSON response to check if this pod is the leader + // Expected: [{"rows":[{"leader":true}]}] + type dbResp struct { + Rows []struct { + Leader bool `json:"leader"` + } `json:"rows"` + } + var resp []dbResp + if err := json.Unmarshal([]byte(stdout), &resp); err == nil && + len(resp) > 0 && len(resp[0].Rows) > 0 && resp[0].Rows[0].Leader { + framework.Logf("Found nbdb leader pod: %s", pod.Name) + return pod, nil + } + } + + return nil, fmt.Errorf("no nbdb leader pod found among %d ovnkube-db pods", len(dbPods.Items)) +} diff --git a/test/scripts/e2e-cp.sh b/test/scripts/e2e-cp.sh index 096debe8a6..1ab06622f6 100755 --- a/test/scripts/e2e-cp.sh +++ b/test/scripts/e2e-cp.sh @@ -160,7 +160,14 @@ else # TODO: perhaps the secondary network attached pods should not be attached to default network skip "Multi Homing A single pod with an OVN-K secondary network attached to a localnet network mapped to external primary interface bridge can be reached by a client pod in the default network on the same node" skip "Multi Homing A single pod with an OVN-K secondary network attached to a localnet network mapped to external primary interface bridge can be reached by a client pod in the default network on a different node" - + if [ "$PLATFORM_IPV6_SUPPORT" == true ] && [ "$PLATFORM_IPV4_SUPPORT" == false ]; then + # Skip all Multi Homing tests in BGP IPv6 only mode + # TODO: The tests are doing weird static ipv4, ipv6, dualstack specific ginkgo entries instead of relying on + # cluster family type to make a dynamic determination. These tests need to be refactored to be family-friendly + # instead of assuming only single stack v4 or dualstack lanes exist. + # https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5569 + skip "Multi Homing" + fi # these tests require metallb but the configuration we do for it is not compatible with the configuration we do to advertise the default network # TODO: consolidate configuration skip "Load Balancer Service Tests with MetalLB"