diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index dea0289e73..d9d5d40eec 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -4,6 +4,10 @@ on: merge_group: pull_request: branches: [ master ] + # Only run jobs if at least one non-doc file is changed + paths-ignore: + - '**/*.md' + - 'mkdocs.yml' schedule: - cron: '0 */12 * * *' workflow_dispatch: @@ -38,7 +42,7 @@ jobs: # separate job for parallelism lint: name: Lint - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Check out code uses: actions/checkout@v4 @@ -63,7 +67,7 @@ jobs: build-master: name: Build-master - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: # Create a cache for the built master image - name: Restore master image cache @@ -156,7 +160,7 @@ jobs: build-pr: name: Build-PR - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: # Create a cache for the build PR image - name: Restore PR image cache @@ -200,6 +204,7 @@ jobs: if: steps.is_pr_image_build_needed.outputs.PR_IMAGE_RESTORED != 'true' && success() run: | set -x + sudo apt update sudo apt-get install linux-modules-extra-$(uname -r) -y sudo modprobe vrf @@ -270,7 +275,7 @@ jobs: ovn-upgrade-e2e: name: Upgrade OVN from Master to PR branch based image if: github.event_name != 'schedule' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 timeout-minutes: 120 needs: - build-master @@ -314,14 +319,25 @@ jobs: - name: Free up disk space run: | + df -h sudo rm -rf /usr/local/lib/android/sdk sudo apt-get update sudo eatmydata apt-get purge --auto-remove -y \ - azure-cli aspnetcore-* dotnet-* ghc-* firefox \ + azure-cli firefox \ google-chrome-stable \ - llvm-* microsoft-edge-stable mono-* \ - msbuild mysql-server-core-* php-* php7* \ + llvm-* microsoft-edge-stable \ powershell temurin-* zulu-* + # clean unused packages + sudo apt-get autoclean + sudo apt-get autoremove -y + # clean apt cache + sudo apt-get clean + sudo docker system prune -af --volumes + df -h + sudo swapon --show + sudo swapoff -a + sudo rm -f /mnt/swapfile + df -h - name: Download test-image-master uses: actions/download-artifact@v4 @@ -374,6 +390,7 @@ jobs: uses: actions/checkout@v4 - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: ovn upgrade @@ -382,6 +399,7 @@ jobs: make -C test upgrade-ovn - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: Run E2E shard-conformance @@ -389,6 +407,7 @@ jobs: make -C test shard-conformance - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: Export kind logs @@ -406,7 +425,7 @@ jobs: e2e: name: e2e - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 # 30 mins for kind, 180 mins for control-plane tests, 10 minutes for all other steps timeout-minutes: 220 strategy: @@ -463,15 +482,15 @@ jobs: - {"target": "network-segmentation", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv4", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones"} - {"target": "network-segmentation", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv6", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones"} - {"target": "bgp", "ha": "noHA", "gateway-mode": "local", "ipfamily": "dualstack", "disable-snat-multiple-gws": "snatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "routeadvertisements": "advertise-default", "network-segmentation": "enable-network-segmentation"} - - {"target": "bgp", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv4", "disable-snat-multiple-gws": "snatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "routeadvertisements": "advertise-default", "network-segmentation": "enable-network-segmentation"} + - {"target": "bgp", "ha": "noHA", "gateway-mode": "shared", "ipfamily": "dualstack", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "routeadvertisements": "advertise-default", "network-segmentation": "enable-network-segmentation"} - {"target": "traffic-flow-test-only","ha": "noHA", "gateway-mode": "shared", "ipfamily": "ipv4", "disable-snat-multiple-gws": "noSnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "traffic-flow-tests": "1-24", "network-segmentation": "enable-network-segmentation"} - {"target": "tools", "ha": "noHA", "gateway-mode": "local", "ipfamily": "dualstack", "disable-snat-multiple-gws": "SnatGW", "second-bridge": "1br", "ic": "ic-single-node-zones", "network-segmentation": "enable-network-segmentation"} needs: [ build-pr ] env: JOB_NAME: "${{ matrix.target }}-${{ matrix.ha }}-${{ matrix.gateway-mode }}-${{ matrix.ipfamily }}-${{ matrix.disable-snat-multiple-gws }}-${{ matrix.second-bridge }}-${{ matrix.ic }}" OVN_HYBRID_OVERLAY_ENABLE: ${{ (matrix.target == 'control-plane' || matrix.target == 'control-plane-helm') && (matrix.ipfamily == 'ipv4' || matrix.ipfamily == 'dualstack' ) }} - OVN_MULTICAST_ENABLE: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' || matrix.target == 'network-segmentation' }}" - OVN_EMPTY_LB_EVENTS: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' }}" + OVN_MULTICAST_ENABLE: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' || matrix.target == 'network-segmentation' || matrix.target == 'bgp' }}" + OVN_EMPTY_LB_EVENTS: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' || matrix.target == 'bgp' }}" OVN_HA: "${{ matrix.ha == 'HA' }}" OVN_DISABLE_SNAT_MULTIPLE_GWS: "${{ matrix.disable-snat-multiple-gws == 'noSnatGW' }}" KIND_INSTALL_METALLB: "${{ matrix.target == 'control-plane' || matrix.target == 'control-plane-helm' || matrix.target == 'network-segmentation' }}" @@ -479,7 +498,6 @@ jobs: OVN_SECOND_BRIDGE: "${{ matrix.second-bridge == '2br' }}" ENABLE_MULTI_NET: "${{ matrix.target == 'multi-homing' || matrix.target == 'kv-live-migration' || matrix.target == 'network-segmentation' || matrix.target == 'tools' || matrix.target == 'multi-homing-helm' || matrix.target == 'traffic-flow-test-only' || matrix.routeadvertisements != '' }}" ENABLE_NETWORK_SEGMENTATION: "${{ matrix.target == 'network-segmentation' || matrix.network-segmentation == 'enable-network-segmentation' }}" - DISABLE_UDN_HOST_ISOLATION: "true" PLATFORM_IPV4_SUPPORT: "${{ matrix.ipfamily == 'IPv4' || matrix.ipfamily == 'dualstack' }}" PLATFORM_IPV6_SUPPORT: "${{ matrix.ipfamily == 'IPv6' || matrix.ipfamily == 'dualstack' }}" KIND_INSTALL_KUBEVIRT: "${{ matrix.target == 'kv-live-migration' }}" @@ -500,20 +518,31 @@ jobs: - name: Install VRF kernel module run: | set -x + sudo apt update sudo apt-get install linux-modules-extra-$(uname -r) -y sudo modprobe vrf - name: Free up disk space run: | + df -h sudo rm -rf /usr/local/lib/android/sdk sudo apt-get update sudo eatmydata apt-get purge --auto-remove -y \ - azure-cli aspnetcore-* dotnet-* ghc-* firefox \ + azure-cli firefox \ google-chrome-stable \ - llvm-* microsoft-edge-stable mono-* \ - msbuild mysql-server-core-* php-* php7* \ + llvm-* microsoft-edge-stable \ powershell temurin-* zulu-* - sudo docker system prune -af + # clean unused packages + sudo apt-get autoclean + sudo apt-get autoremove -y + # clean apt cache + sudo apt-get clean + sudo docker system prune -af --volumes + df -h + sudo swapon --show + sudo swapoff -a + sudo rm -f /mnt/swapfile + df -h - name: Setup /mnt/runner directory run: | @@ -557,7 +586,7 @@ jobs: echo OVN_TEST_EX_GW_NETWORK=xgw >> $GITHUB_ENV echo OVN_ENABLE_EX_GW_NETWORK_BRIDGE=true >> $GITHUB_ENV fi - if [[ "$JOB_NAME" == *"shard-conformance"* ]] && [ "$ADVERTISE_DEFAULT_NETWORK" == "true" ]; then + if [ "$ADVERTISE_DEFAULT_NETWORK" == "true" ]; then echo "ADVERTISE_DEFAULT_NETWORK=true" >> $GITHUB_ENV # Use proper variable declaration with default values @@ -609,12 +638,15 @@ jobs: run: make -C test traffic-flow-tests WHAT="setup" - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: Run Tests # e2e tests take ~60 minutes normally, 120 should be more than enough # set 3 hours for control-plane tests as these might take a while - timeout-minutes: ${{ matrix.target == 'control-plane' && 180 || matrix.target == 'control-plane-helm' && 180 || matrix.target == 'external-gateway' && 180 || 120 }} + # give 10m extra to give ginkgo chance to timeout before github so that we + # get its output + timeout-minutes: ${{ matrix.target == 'bgp' && 190 || matrix.target == 'control-plane' && 190 || matrix.target == 'control-plane-helm' && 190 || matrix.target == 'external-gateway' && 190 || 130 }} run: | # used by e2e diagnostics package export OVN_IMAGE="ovn-daemonset-fedora:pr" @@ -639,7 +671,7 @@ jobs: elif [ "${{ matrix.target }}" == "network-segmentation" ]; then make -C test control-plane WHAT="Network Segmentation" elif [ "${{ matrix.target }}" == "bgp" ]; then - make -C test control-plane WHAT="BGP" + make -C test control-plane elif [ "${{ matrix.target }}" == "tools" ]; then make -C go-controller build make -C test tools @@ -660,6 +692,7 @@ jobs: fi - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: Export kind logs @@ -681,7 +714,7 @@ jobs: e2e-dual-conversion: name: e2e-dual-conversion if: github.event_name != 'schedule' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 timeout-minutes: 60 strategy: fail-fast: false @@ -726,14 +759,25 @@ jobs: - name: Free up disk space run: | + df -h sudo rm -rf /usr/local/lib/android/sdk sudo apt-get update sudo eatmydata apt-get purge --auto-remove -y \ - azure-cli aspnetcore-* dotnet-* ghc-* firefox \ + azure-cli firefox \ google-chrome-stable \ - llvm-* microsoft-edge-stable mono-* \ - msbuild mysql-server-core-* php-* php7* \ + llvm-* microsoft-edge-stable \ powershell temurin-* zulu-* + # clean unused packages + sudo apt-get autoclean + sudo apt-get autoremove -y + # clean apt cache + sudo apt-get clean + sudo docker system prune -af --volumes + df -h + sudo swapon --show + sudo swapoff -a + sudo rm -f /mnt/swapfile + df -h - name: Disable ufw # For IPv6 and Dualstack, ufw (Uncomplicated Firewall) should be disabled. @@ -760,6 +804,7 @@ jobs: ./contrib/kind-dual-stack-conversion.sh - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: Run Dual-Stack Tests @@ -767,6 +812,7 @@ jobs: make -C test shard-test WHAT="Networking Granular Checks\|DualStack" - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: Run Dual-Stack Control-Plane Tests @@ -774,6 +820,7 @@ jobs: make -C test control-plane WHAT="DualStack" - name: Runner Diagnostics + if: always() uses: ./.github/actions/diagnostics - name: Export kind logs diff --git a/MEETINGS.md b/MEETINGS.md index 701459788f..8964025628 100644 --- a/MEETINGS.md +++ b/MEETINGS.md @@ -6,7 +6,7 @@ All are welcome to join our meetings! If you want to discuss something with the ## Meeting time -We meet alternate Monday's at 6:00 PM CET/CEST. +We meet alternate Monday's at 5:00 PM CET/CEST. In order to figure out when our next meeting is, please check our agenda for previous meeting history. The meetings last up to 1 hour. diff --git a/contrib/kind-common b/contrib/kind-common index 66cc078d3e..2a564dece0 100644 --- a/contrib/kind-common +++ b/contrib/kind-common @@ -174,16 +174,16 @@ EOF # Override GOBIN until https://github.com/metallb/metallb/issues/2218 is fixed. GOBIN="" inv dev-env -n ovn -b frr -p bgp -i "${ip_family}" - docker network rm -f clientnet - docker network create --subnet="${METALLB_CLIENT_NET_SUBNET_IPV4}" ${ipv6_network} --driver bridge clientnet - docker network connect clientnet frr + $OCI_BIN network rm -f clientnet + $OCI_BIN network create --subnet="${METALLB_CLIENT_NET_SUBNET_IPV4}" ${ipv6_network} --driver bridge clientnet + $OCI_BIN network connect clientnet frr if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then # Enable IPv6 forwarding in FRR - docker exec frr sysctl -w net.ipv6.conf.all.forwarding=1 + $OCI_BIN exec frr sysctl -w net.ipv6.conf.all.forwarding=1 fi # Note: this image let's us use it also for creating load balancer backends that can send big packets - docker rm -f lbclient - docker run --cap-add NET_ADMIN --user 0 -d --network clientnet --rm --name lbclient quay.io/itssurya/dev-images:metallb-lbservice + $OCI_BIN rm -f lbclient + $OCI_BIN run --cap-add NET_ADMIN --user 0 -d --network clientnet --rm --name lbclient quay.io/itssurya/dev-images:metallb-lbservice popd delete_metallb_dir @@ -197,18 +197,18 @@ EOF kubectl label node "$n" node.kubernetes.io/exclude-from-external-load-balancers- done - kind_network_v4=$(docker inspect -f '{{index .NetworkSettings.Networks "kind" "IPAddress"}}' frr) + kind_network_v4=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.kind.IPAddress}}' frr) echo "FRR kind network IPv4: ${kind_network_v4}" - kind_network_v6=$(docker inspect -f '{{index .NetworkSettings.Networks "kind" "GlobalIPv6Address"}}' frr) + kind_network_v6=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.kind.GlobalIPv6Address}}' frr) echo "FRR kind network IPv6: ${kind_network_v6}" local client_network_v4 client_network_v6 - client_network_v4=$(docker inspect -f '{{index .NetworkSettings.Networks "clientnet" "IPAddress"}}' frr) + client_network_v4=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.clientnet.IPAddress}}' frr) echo "FRR client network IPv4: ${client_network_v4}" - client_network_v6=$(docker inspect -f '{{index .NetworkSettings.Networks "clientnet" "GlobalIPv6Address"}}' frr) + client_network_v6=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.clientnet.GlobalIPv6Address}}' frr) echo "FRR client network IPv6: ${client_network_v6}" local client_subnets - client_subnets=$(docker network inspect clientnet -f '{{range .IPAM.Config}}{{.Subnet}}#{{end}}') + client_subnets=$($OCI_BIN network inspect clientnet -f '{{range .IPAM.Config}}{{.Subnet}}#{{end}}') echo "${client_subnets}" local client_subnets_v4 client_subnets_v6 client_subnets_v4=$(echo "${client_subnets}" | cut -d '#' -f 1) @@ -219,10 +219,10 @@ EOF KIND_NODES=$(kind_get_nodes) for n in ${KIND_NODES}; do if [ "$PLATFORM_IPV4_SUPPORT" == true ]; then - docker exec "${n}" ip route add "${client_subnets_v4}" via "${kind_network_v4}" + $OCI_BIN exec "${n}" ip route add "${client_subnets_v4}" via "${kind_network_v4}" fi if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then - docker exec "${n}" ip -6 route add "${client_subnets_v6}" via "${kind_network_v6}" + $OCI_BIN exec "${n}" ip -6 route add "${client_subnets_v6}" via "${kind_network_v6}" fi done @@ -230,10 +230,10 @@ EOF # one svcVIP (192.168.10.0/fc00:f853:ccd:e799::) is more than enough since at a time we will only # have one load balancer service if [ "$PLATFORM_IPV4_SUPPORT" == true ]; then - docker exec lbclient ip route add 192.168.10.0 via "${client_network_v4}" dev eth0 + $OCI_BIN exec lbclient ip route add 192.168.10.0 via "${client_network_v4}" dev eth0 fi if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then - docker exec lbclient ip -6 route add fc00:f853:ccd:e799:: via "${client_network_v6}" dev eth0 + $OCI_BIN exec lbclient ip -6 route add fc00:f853:ccd:e799:: via "${client_network_v6}" dev eth0 fi sleep 30 } @@ -254,14 +254,14 @@ install_plugins() { } destroy_metallb() { - if docker ps --format '{{.Names}}' | grep -Eq '^lbclient$'; then - docker stop lbclient + if $OCI_BIN ps --format '{{.Names}}' | grep -Eq '^lbclient$'; then + $OCI_BIN stop lbclient fi - if docker ps --format '{{.Names}}' | grep -Eq '^frr$'; then - docker stop frr + if $OCI_BIN ps --format '{{.Names}}' | grep -Eq '^frr$'; then + $OCI_BIN stop frr fi - if docker network ls --format '{{.Name}}' | grep -q '^clientnet$'; then - docker network rm clientnet + if $OCI_BIN network ls --format '{{.Name}}' | grep -q '^clientnet$'; then + $OCI_BIN network rm clientnet fi delete_metallb_dir } @@ -388,30 +388,7 @@ install_kubevirt() { local kubevirt_stable_release_url=$(get_kubevirt_release_url "stable") kubectl -n kubevirt patch kubevirt kubevirt --type=json --patch '[{"op":"add","path":"/spec/configuration/network","value":{}},{"op":"add","path":"/spec/configuration/network/binding","value":{"l2bridge":{"domainAttachmentType":"managedTap","migration":{}}}}]' - - if [ ! -d "./bin" ] - then - mkdir -p ./bin - if_error_exit "Failed to create bin dir!" - fi - - if [[ "$OSTYPE" == "linux-gnu" ]]; then - OS_TYPE="linux" - elif [[ "$OSTYPE" == "darwin"* ]]; then - OS_TYPE="darwin" - fi - - pushd ./bin - if [ ! -f ./virtctl ]; then - kubevirt_stable_release_url=$(get_kubevirt_release_url "stable") - cli_name="virtctl-${kubevirt_stable_release_url##*/}-${OS_TYPE}-${ARCH}" - curl -LO "${kubevirt_stable_release_url}/${cli_name}" - mv ${cli_name} virtctl - if_error_exit "Failed to download virtctl!" - fi - popd - chmod +x ./bin/virtctl } install_cert_manager() { @@ -708,7 +685,7 @@ deploy_frr_external_container() { popd || exit 1 if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then # Enable IPv6 forwarding in FRR - docker exec frr sysctl -w net.ipv6.conf.all.forwarding=1 + $OCI_BIN exec frr sysctl -w net.ipv6.conf.all.forwarding=1 fi } @@ -735,33 +712,40 @@ deploy_bgp_external_server() { ip_family="ipv4" ipv6_network="" fi - docker rm -f bgpserver - docker network rm -f bgpnet - docker network create --subnet="${BGP_SERVER_NET_SUBNET_IPV4}" ${ipv6_network} --driver bridge bgpnet - docker network connect bgpnet frr - docker run --cap-add NET_ADMIN --user 0 -d --network bgpnet --rm --name bgpserver -p 8080:8080 registry.k8s.io/e2e-test-images/agnhost:2.45 netexec + $OCI_BIN rm -f bgpserver + $OCI_BIN network rm -f bgpnet + $OCI_BIN network create --subnet="${BGP_SERVER_NET_SUBNET_IPV4}" ${ipv6_network} --driver bridge bgpnet + $OCI_BIN network connect bgpnet frr + $OCI_BIN run --cap-add NET_ADMIN --user 0 -d --network bgpnet --rm --name bgpserver -p 8080:8080 registry.k8s.io/e2e-test-images/agnhost:2.45 netexec # let's make the bgp external server have its default route towards FRR router so that we don't need to add routes during tests back to the pods in the # cluster for return traffic local bgp_network_frr_v4 bgp_network_frr_v6 - bgp_network_frr_v4=$($OCI_BIN inspect -f '{{index .NetworkSettings.Networks "bgpnet" "IPAddress"}}' frr) + bgp_network_frr_v4=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.bgpnet.IPAddress}}' frr) echo "FRR kind network IPv4: ${bgp_network_frr_v4}" $OCI_BIN exec bgpserver ip route replace default via "$bgp_network_frr_v4" if [ "$PLATFORM_IPV6_SUPPORT" == true ] ; then - bgp_network_frr_v6=$($OCI_BIN inspect -f '{{index .NetworkSettings.Networks "bgpnet" "GlobalIPv6Address"}}' frr) + bgp_network_frr_v6=$($OCI_BIN inspect -f '{{.NetworkSettings.Networks.bgpnet.GlobalIPv6Address}}' frr) echo "FRR kind network IPv6: ${bgp_network_frr_v6}" $OCI_BIN exec bgpserver ip -6 route replace default via "$bgp_network_frr_v6" fi + # disable the default route to make sure the container only routes accross + # directly connected or learnt networks (doing this at the very end since + # docker changes the routing table when a new network is connected) + $OCI_BIN exec frr ip route delete default + $OCI_BIN exec frr ip route + $OCI_BIN exec frr ip -6 route delete default + $OCI_BIN exec frr ip -6 route } destroy_bgp() { - if docker ps --format '{{.Names}}' | grep -Eq '^bgpserver$'; then - docker stop bgpserver + if $OCI_BIN ps --format '{{.Names}}' | grep -Eq '^bgpserver$'; then + $OCI_BIN stop bgpserver fi - if docker ps --format '{{.Names}}' | grep -Eq '^frr$'; then - docker stop frr + if $OCI_BIN ps --format '{{.Names}}' | grep -Eq '^frr$'; then + $OCI_BIN stop frr fi - if docker network ls --format '{{.Name}}' | grep -q '^bgpnet$'; then - docker network rm bgpnet + if $OCI_BIN network ls --format '{{.Name}}' | grep -q '^bgpnet$'; then + $OCI_BIN network rm bgpnet fi } @@ -800,7 +784,7 @@ install_ffr_k8s() { echo "Attempting to reach frr-k8s webhook" kind export kubeconfig --name ovn while true; do -docker exec ovn-control-plane curl -ksS --connect-timeout 0.1 https://$(kubectl get svc -n frr-k8s-system frr-k8s-webhook-service -o jsonpath='{.spec.clusterIP}') +$OCI_BIN exec ovn-control-plane curl -ksS --connect-timeout 0.1 https://$(kubectl get svc -n frr-k8s-system frr-k8s-webhook-service -o jsonpath='{.spec.clusterIP}') [ \$? -eq 0 ] && exit 0 echo "Couldn't reach frr-k8s webhook, trying in 1s..." sleep 1s @@ -817,7 +801,7 @@ EOF rm -rf "${FRR_TMP_DIR}" # Add routes for pod networks dynamically into the github runner for return traffic to pass back - if [ -n "${JOB_NAME:-}" ] && [[ "$JOB_NAME" == *"shard-conformance"* ]] && [ "$ADVERTISE_DEFAULT_NETWORK" == "true" ]; then + if [ "$ADVERTISE_DEFAULT_NETWORK" = "true" ]; then echo "Adding routes for Kubernetes pod networks..." NODES=$(kubectl get nodes -o jsonpath='{.items[*].metadata.name}') echo "Found nodes: $NODES" @@ -835,7 +819,7 @@ EOF # Add IPv4 route if [ -n "$ipv4_subnet" ] && [ -n "$node_ipv4" ]; then echo "Adding IPv4 route for $node ($node_ipv4): $ipv4_subnet" - sudo ip route add $ipv4_subnet via $node_ipv4 + sudo ip route replace $ipv4_subnet via $node_ipv4 fi fi @@ -847,7 +831,7 @@ EOF if [ -n "$ipv6_subnet" ] && [ -n "$node_ipv6" ]; then echo "Adding IPv6 route for $node ($node_ipv6): $ipv6_subnet" - sudo ip -6 route add $ipv6_subnet via $node_ipv6 + sudo ip -6 route replace $ipv6_subnet via $node_ipv6 fi fi done diff --git a/contrib/kind-helm.sh b/contrib/kind-helm.sh index c682c94ac7..17c30a1bc7 100755 --- a/contrib/kind-helm.sh +++ b/contrib/kind-helm.sh @@ -27,6 +27,7 @@ set_default_params() { export KIND_REMOVE_TAINT=${KIND_REMOVE_TAINT:-true} export ENABLE_MULTI_NET=${ENABLE_MULTI_NET:-false} export ENABLE_NETWORK_SEGMENTATION=${ENABLE_NETWORK_SEGMENTATION:-false} + export ENABLE_PRE_CONF_UDN_ADDR=${ENABLE_PRE_CONF_UDN_ADDR:-false} export OVN_NETWORK_QOS_ENABLE=${OVN_NETWORK_QOS_ENABLE:-false} export KIND_NUM_WORKER=${KIND_NUM_WORKER:-2} export KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:-ovn} @@ -99,6 +100,7 @@ usage() { echo " [ -ikv | --install-kubevirt ]" echo " [ -mne | --multi-network-enable ]" echo " [ -nse | --network-segmentation-enable ]" + echo " [ -uae | --preconfigured-udn-addresses-enable ]" echo " [ -nqe | --network-qos-enable ]" echo " [ -wk | --num-workers ]" echo " [ -ic | --enable-interconnect]" @@ -106,28 +108,29 @@ usage() { echo " [ -cn | --cluster-name ]" echo " [ -h ]" echo "" - echo "--delete Delete current cluster" - echo "-cf | --config-file Name of the KIND configuration file" - echo "-kt | --keep-taint Do not remove taint components" - echo " DEFAULT: Remove taint components" - echo "-me | --multicast-enabled Enable multicast. DEFAULT: Disabled" - echo "-ho | --hybrid-enabled Enable hybrid overlay. DEFAULT: Disabled" - echo "-obs | --observability Enable observability. DEFAULT: Disabled" - echo "-el | --ovn-empty-lb-events Enable empty-lb-events generation for LB without backends. DEFAULT: Disabled" - echo "-ii | --install-ingress Flag to install Ingress Components." - echo " DEFAULT: Don't install ingress components." - echo "-mlb | --install-metallb Install metallb to test service type LoadBalancer deployments" - echo "-pl | --install-cni-plugins Install CNI plugins" - echo "-ikv | --install-kubevirt Install kubevirt" - echo "-mne | --multi-network-enable Enable multi networks. DEFAULT: Disabled" - echo "-nse | --network-segmentation-enable Enable network segmentation. DEFAULT: Disabled" - echo "-nqe | --network-qos-enable Enable network QoS. DEFAULT: Disabled" - echo "-ha | --ha-enabled Enable high availability. DEFAULT: HA Disabled" - echo "-wk | --num-workers Number of worker nodes. DEFAULT: 2 workers" - echo "-cn | --cluster-name Configure the kind cluster's name" - echo "-dns | --enable-dnsnameresolver Enable DNSNameResolver for resolving the DNS names used in the DNS rules of EgressFirewall." - echo "-ic | --enable-interconnect Enable interconnect with each node as a zone (only valid if OVN_HA is false)" - echo "-npz | --nodes-per-zone Specify number of nodes per zone (Default 0, which means global zone; >0 means interconnect zone, where 1 for single-node zone, >1 for multi-node zone). If this value > 1, then (total k8s nodes (workers + 1) / num of nodes per zone) should be zero." + echo "--delete Delete current cluster" + echo "-cf | --config-file Name of the KIND configuration file" + echo "-kt | --keep-taint Do not remove taint components" + echo " DEFAULT: Remove taint components" + echo "-me | --multicast-enabled Enable multicast. DEFAULT: Disabled" + echo "-ho | --hybrid-enabled Enable hybrid overlay. DEFAULT: Disabled" + echo "-obs | --observability Enable observability. DEFAULT: Disabled" + echo "-el | --ovn-empty-lb-events Enable empty-lb-events generation for LB without backends. DEFAULT: Disabled" + echo "-ii | --install-ingress Flag to install Ingress Components." + echo " DEFAULT: Don't install ingress components." + echo "-mlb | --install-metallb Install metallb to test service type LoadBalancer deployments" + echo "-pl | --install-cni-plugins Install CNI plugins" + echo "-ikv | --install-kubevirt Install kubevirt" + echo "-mne | --multi-network-enable Enable multi networks. DEFAULT: Disabled" + echo "-nse | --network-segmentation-enable Enable network segmentation. DEFAULT: Disabled" + echo "-uae | --preconfigured-udn-addresses-enable Enable connecting workloads with preconfigured network to user-defined networks. DEFAULT: Disabled" + echo "-nqe | --network-qos-enable Enable network QoS. DEFAULT: Disabled" + echo "-ha | --ha-enabled Enable high availability. DEFAULT: HA Disabled" + echo "-wk | --num-workers Number of worker nodes. DEFAULT: 2 workers" + echo "-cn | --cluster-name Configure the kind cluster's name" + echo "-dns | --enable-dnsnameresolver Enable DNSNameResolver for resolving the DNS names used in the DNS rules of EgressFirewall." + echo "-ic | --enable-interconnect Enable interconnect with each node as a zone (only valid if OVN_HA is false)" + echo "-npz | --nodes-per-zone Specify number of nodes per zone (Default 0, which means global zone; >0 means interconnect zone, where 1 for single-node zone, >1 for multi-node zone). If this value > 1, then (total k8s nodes (workers + 1) / num of nodes per zone) should be zero." echo "" } @@ -168,6 +171,8 @@ parse_args() { ;; -nse | --network-segmentation-enable) ENABLE_NETWORK_SEGMENTATION=true ;; + -uae | --preconfigured-udn-addresses-enable) ENABLE_PRE_CONF_UDN_ADDR=true + ;; -nqe | --network-qos-enable ) OVN_NETWORK_QOS_ENABLE=true ;; -ha | --ha-enabled ) OVN_HA=true @@ -223,6 +228,7 @@ print_params() { echo "KIND_REMOVE_TAINT = $KIND_REMOVE_TAINT" echo "ENABLE_MULTI_NET = $ENABLE_MULTI_NET" echo "ENABLE_NETWORK_SEGMENTATION = $ENABLE_NETWORK_SEGMENTATION" + echo "ENABLE_PRE_CONF_UDN_ADDR = $ENABLE_PRE_CONF_UDN_ADDR" echo "OVN_NETWORK_QOS_ENABLE = $OVN_NETWORK_QOS_ENABLE" echo "OVN_IMAGE = $OVN_IMAGE" echo "KIND_NUM_MASTER = $KIND_NUM_MASTER" @@ -274,7 +280,12 @@ build_ovn_image() { # Find all built executables, but ignore the 'windows' directory if it exists find ../../go-controller/_output/go/bin/ -maxdepth 1 -type f -exec cp -f {} . \; echo "ref: $(git rev-parse --symbolic-full-name HEAD) commit: $(git rev-parse HEAD)" > git_info - $OCI_BIN build -t "${OVN_IMAGE}" -f Dockerfile.fedora . + $OCI_BIN build \ + --build-arg http_proxy="$http_proxy" \ + --build-arg https_proxy="$https_proxy" \ + --network=host \ + -t "${OVN_IMAGE}" \ + -f Dockerfile.fedora . popd } @@ -416,6 +427,7 @@ helm install ovn-kubernetes . -f "${value_file}" \ --set global.enableMulticast=$(if [ "${OVN_MULTICAST_ENABLE}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.enableMultiNetwork=$(if [ "${ENABLE_MULTI_NET}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.enableNetworkSegmentation=$(if [ "${ENABLE_NETWORK_SEGMENTATION}" == "true" ]; then echo "true"; else echo "false"; fi) \ + --set global.enablePreconfiguredUDNAddresses=$(if [ "${ENABLE_PRE_CONF_UDN_ADDR}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.enableHybridOverlay=$(if [ "${OVN_HYBRID_OVERLAY_ENABLE}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.enableObservability=$(if [ "${OVN_OBSERV_ENABLE}" == "true" ]; then echo "true"; else echo "false"; fi) \ --set global.emptyLbEvents=$(if [ "${OVN_EMPTY_LB_EVENTS}" == "true" ]; then echo "true"; else echo "false"; fi) \ diff --git a/contrib/kind.sh b/contrib/kind.sh index 206cf5d942..af1c0f537c 100755 --- a/contrib/kind.sh +++ b/contrib/kind.sh @@ -42,6 +42,8 @@ function setup_kubectl_bin() { # The root cause is unknown, this also can not be reproduced in Ubuntu 20.04 or # with Fedora32 Cloud, but it does not happen if we clean first the ovn-kubernetes resources. delete() { + OCI_BIN=${KIND_EXPERIMENTAL_PROVIDER:-docker} + if [ "$KIND_INSTALL_METALLB" == true ]; then destroy_metallb fi @@ -80,6 +82,7 @@ usage() { echo " [-is | --ipsec]" echo " [-cm | --compact-mode]" echo " [-ic | --enable-interconnect]" + echo " [-uae | --preconfigured-udn-addresses-enable]" echo " [-rae | --enable-route-advertisements]" echo " [-adv | --advertise-default-network]" echo " [-nqe | --network-qos-enable]" @@ -88,73 +91,74 @@ usage() { echo " [-obs | --observability]" echo " [-h]]" echo "" - echo "-cf | --config-file Name of the KIND J2 configuration file." - echo " DEFAULT: ./kind.yaml.j2" - echo "-kt | --keep-taint Do not remove taint components." - echo " DEFAULT: Remove taint components." - echo "-ha | --ha-enabled Enable high availability. DEFAULT: HA Disabled." - echo "-scm | --separate-cluster-manager Separate cluster manager from ovnkube-master and run as a separate container within ovnkube-master deployment." - echo "-me | --multicast-enabled Enable multicast. DEFAULT: Disabled." - echo "-ho | --hybrid-enabled Enable hybrid overlay. DEFAULT: Disabled." - echo "-ds | --disable-snat-multiple-gws Disable SNAT for multiple gws. DEFAULT: Disabled." - echo "-dp | --disable-pkt-mtu-check Disable checking packet size greater than MTU. Default: Disabled" - echo "-df | --disable-forwarding Disable forwarding on OVNK managed interfaces. Default: Disabled" - echo "-ecp | --encap-port UDP port used for geneve overlay. DEFAULT: 6081" - echo "-pl | --install-cni-plugins ] Installs additional CNI network plugins. DEFAULT: Disabled" - echo "-nf | --netflow-targets Comma delimited list of ip:port or :port (using node IP) netflow collectors. DEFAULT: Disabled." - echo "-sf | --sflow-targets Comma delimited list of ip:port or :port (using node IP) sflow collectors. DEFAULT: Disabled." - echo "-if | --ipfix-targets Comma delimited list of ip:port or :port (using node IP) ipfix collectors. DEFAULT: Disabled." - echo "-ifs | --ipfix-sampling Fraction of packets that are sampled and sent to each target collector: 1 packet out of every . DEFAULT: 400 (1 out of 400 packets)." - echo "-ifm | --ipfix-cache-max-flows Maximum number of IPFIX flow records that can be cached at a time. If 0, caching is disabled. DEFAULT: Disabled." - echo "-ifa | --ipfix-cache-active-timeout Maximum period in seconds for which an IPFIX flow record is cached and aggregated before being sent. If 0, caching is disabled. DEFAULT: 60." - echo "-el | --ovn-empty-lb-events Enable empty-lb-events generation for LB without backends. DEFAULT: Disabled" - echo "-ii | --install-ingress Flag to install Ingress Components." - echo " DEFAULT: Don't install ingress components." - echo "-mlb | --install-metallb Install metallb to test service type LoadBalancer deployments" - echo "-n4 | --no-ipv4 Disable IPv4. DEFAULT: IPv4 Enabled." - echo "-i6 | --ipv6 Enable IPv6. DEFAULT: IPv6 Disabled." - echo "-wk | --num-workers Number of worker nodes. DEFAULT: HA - 2 worker" - echo " nodes and no HA - 0 worker nodes." - echo "-sw | --allow-system-writes Allow script to update system. Intended to allow" - echo " github CI to be updated with IPv6 settings." - echo " DEFAULT: Don't allow." - echo "-gm | --gateway-mode Enable 'shared' or 'local' gateway mode." - echo " DEFAULT: shared." - echo "-ov | --ovn-image Use the specified docker image instead of building locally. DEFAULT: local build." - echo "-ovr | --ovn-repo Specify the repository to build OVN from" - echo "-ovg | --ovn-gitref Specify the branch, tag or commit id to build OVN from, it can be a pattern like 'branch-*' it will order results and use the first one" - echo "-ml | --master-loglevel Log level for ovnkube (master), DEFAULT: 5." - echo "-nl | --node-loglevel Log level for ovnkube (node), DEFAULT: 5" - echo "-dbl | --dbchecker-loglevel Log level for ovn-dbchecker (ovnkube-db), DEFAULT: 5." - echo "-ndl | --ovn-loglevel-northd Log config for ovn northd, DEFAULT: '-vconsole:info -vfile:info'." - echo "-nbl | --ovn-loglevel-nb Log config for northbound DB DEFAULT: '-vconsole:info -vfile:info'." - echo "-sbl | --ovn-loglevel-sb Log config for southboudn DB DEFAULT: '-vconsole:info -vfile:info'." - echo "-cl | --ovn-loglevel-controller Log config for ovn-controller DEFAULT: '-vconsole:info'." - echo "-lcl | --libovsdb-client-logfile Separate logs for libovsdb client into provided file. DEFAULT: do not separate." - echo "-ep | --experimental-provider Use an experimental OCI provider such as podman, instead of docker. DEFAULT: Disabled." - echo "-eb | --egress-gw-separate-bridge The external gateway traffic uses a separate bridge." - echo "-lr | --local-kind-registry Configure kind to use a local docker registry rather than manually loading images" - echo "-dd | --dns-domain Configure a custom dnsDomain for k8s services, Defaults to 'cluster.local'" - echo "-cn | --cluster-name Configure the kind cluster's name" - echo "-ric | --run-in-container Configure the script to be run from a docker container, allowing it to still communicate with the kind controlplane" - echo "-ehp | --egress-ip-healthcheck-port TCP port used for gRPC session by egress IP node check. DEFAULT: 9107 (Use "0" for legacy dial to port 9)." - echo "-is | --ipsec Enable IPsec encryption (spawns ovn-ipsec pods)" - echo "-sm | --scale-metrics Enable scale metrics" - echo "-cm | --compact-mode Enable compact mode, ovnkube master and node run in the same process." - echo "-ic | --enable-interconnect Enable interconnect with each node as a zone (only valid if OVN_HA is false)" - echo "-nqe | --network-qos-enable Enable network QoS. DEFAULT: Disabled." - echo "--disable-ovnkube-identity Disable per-node cert and ovnkube-identity webhook" - echo "-npz | --nodes-per-zone If interconnect is enabled, number of nodes per zone (Default 1). If this value > 1, then (total k8s nodes (workers + 1) / num of nodes per zone) should be zero." - echo "-mtu Define the overlay mtu" - echo "--isolated Deploy with an isolated environment (no default gateway)" - echo "--delete Delete current cluster" - echo "--deploy Deploy ovn kubernetes without restarting kind" - echo "--add-nodes Adds nodes to an existing cluster. The number of nodes to be added is specified by --num-workers. Also use -ic if the cluster is using interconnect." - echo "-dns | --enable-dnsnameresolver Enable DNSNameResolver for resolving the DNS names used in the DNS rules of EgressFirewall." - echo "-obs | --observability Enable OVN Observability feature." - echo "-rae | --enable-route-advertisements Enable route advertisements" - echo "-adv | --advertise-default-network Applies a RouteAdvertisements configuration to advertise the default network on all nodes" - echo "" +echo "-cf | --config-file Name of the KIND J2 configuration file." +echo " DEFAULT: ./kind.yaml.j2" +echo "-kt | --keep-taint Do not remove taint components." +echo " DEFAULT: Remove taint components." +echo "-ha | --ha-enabled Enable high availability. DEFAULT: HA Disabled." +echo "-scm | --separate-cluster-manager Separate cluster manager from ovnkube-master and run as a separate container within ovnkube-master deployment." +echo "-me | --multicast-enabled Enable multicast. DEFAULT: Disabled." +echo "-ho | --hybrid-enabled Enable hybrid overlay. DEFAULT: Disabled." +echo "-ds | --disable-snat-multiple-gws Disable SNAT for multiple gws. DEFAULT: Disabled." +echo "-dp | --disable-pkt-mtu-check Disable checking packet size greater than MTU. Default: Disabled" +echo "-df | --disable-forwarding Disable forwarding on OVNK managed interfaces. Default: Disabled" +echo "-ecp | --encap-port UDP port used for geneve overlay. DEFAULT: 6081" +echo "-pl | --install-cni-plugins ] Installs additional CNI network plugins. DEFAULT: Disabled" +echo "-nf | --netflow-targets Comma delimited list of ip:port or :port (using node IP) netflow collectors. DEFAULT: Disabled." +echo "-sf | --sflow-targets Comma delimited list of ip:port or :port (using node IP) sflow collectors. DEFAULT: Disabled." +echo "-if | --ipfix-targets Comma delimited list of ip:port or :port (using node IP) ipfix collectors. DEFAULT: Disabled." +echo "-ifs | --ipfix-sampling Fraction of packets that are sampled and sent to each target collector: 1 packet out of every . DEFAULT: 400 (1 out of 400 packets)." +echo "-ifm | --ipfix-cache-max-flows Maximum number of IPFIX flow records that can be cached at a time. If 0, caching is disabled. DEFAULT: Disabled." +echo "-ifa | --ipfix-cache-active-timeout Maximum period in seconds for which an IPFIX flow record is cached and aggregated before being sent. If 0, caching is disabled. DEFAULT: 60." +echo "-el | --ovn-empty-lb-events Enable empty-lb-events generation for LB without backends. DEFAULT: Disabled" +echo "-ii | --install-ingress Flag to install Ingress Components." +echo " DEFAULT: Don't install ingress components." +echo "-mlb | --install-metallb Install metallb to test service type LoadBalancer deployments" +echo "-n4 | --no-ipv4 Disable IPv4. DEFAULT: IPv4 Enabled." +echo "-i6 | --ipv6 Enable IPv6. DEFAULT: IPv6 Disabled." +echo "-wk | --num-workers Number of worker nodes. DEFAULT: HA - 2 worker" +echo " nodes and no HA - 0 worker nodes." +echo "-sw | --allow-system-writes Allow script to update system. Intended to allow" +echo " github CI to be updated with IPv6 settings." +echo " DEFAULT: Don't allow." +echo "-gm | --gateway-mode Enable 'shared' or 'local' gateway mode." +echo " DEFAULT: shared." +echo "-ov | --ovn-image Use the specified docker image instead of building locally. DEFAULT: local build." +echo "-ovr | --ovn-repo Specify the repository to build OVN from" +echo "-ovg | --ovn-gitref Specify the branch, tag or commit id to build OVN from, it can be a pattern like 'branch-*' it will order results and use the first one" +echo "-ml | --master-loglevel Log level for ovnkube (master), DEFAULT: 5." +echo "-nl | --node-loglevel Log level for ovnkube (node), DEFAULT: 5" +echo "-dbl | --dbchecker-loglevel Log level for ovn-dbchecker (ovnkube-db), DEFAULT: 5." +echo "-ndl | --ovn-loglevel-northd Log config for ovn northd, DEFAULT: '-vconsole:info -vfile:info'." +echo "-nbl | --ovn-loglevel-nb Log config for northbound DB DEFAULT: '-vconsole:info -vfile:info'." +echo "-sbl | --ovn-loglevel-sb Log config for southboudn DB DEFAULT: '-vconsole:info -vfile:info'." +echo "-cl | --ovn-loglevel-controller Log config for ovn-controller DEFAULT: '-vconsole:info'." +echo "-lcl | --libovsdb-client-logfile Separate logs for libovsdb client into provided file. DEFAULT: do not separate." +echo "-ep | --experimental-provider Use an experimental OCI provider such as podman, instead of docker. DEFAULT: Disabled." +echo "-eb | --egress-gw-separate-bridge The external gateway traffic uses a separate bridge." +echo "-lr | --local-kind-registry Configure kind to use a local docker registry rather than manually loading images" +echo "-dd | --dns-domain Configure a custom dnsDomain for k8s services, Defaults to 'cluster.local'" +echo "-cn | --cluster-name Configure the kind cluster's name" +echo "-ric | --run-in-container Configure the script to be run from a docker container, allowing it to still communicate with the kind controlplane" +echo "-ehp | --egress-ip-healthcheck-port TCP port used for gRPC session by egress IP node check. DEFAULT: 9107 (Use "0" for legacy dial to port 9)." +echo "-is | --ipsec Enable IPsec encryption (spawns ovn-ipsec pods)" +echo "-sm | --scale-metrics Enable scale metrics" +echo "-cm | --compact-mode Enable compact mode, ovnkube master and node run in the same process." +echo "-ic | --enable-interconnect Enable interconnect with each node as a zone (only valid if OVN_HA is false)" +echo "-nqe | --network-qos-enable Enable network QoS. DEFAULT: Disabled." +echo "--disable-ovnkube-identity Disable per-node cert and ovnkube-identity webhook" +echo "-npz | --nodes-per-zone If interconnect is enabled, number of nodes per zone (Default 1). If this value > 1, then (total k8s nodes (workers + 1) / num of nodes per zone) should be zero." +echo "-mtu Define the overlay mtu" +echo "--isolated Deploy with an isolated environment (no default gateway)" +echo "--delete Delete current cluster" +echo "--deploy Deploy ovn kubernetes without restarting kind" +echo "--add-nodes Adds nodes to an existing cluster. The number of nodes to be added is specified by --num-workers. Also use -ic if the cluster is using interconnect." +echo "-dns | --enable-dnsnameresolver Enable DNSNameResolver for resolving the DNS names used in the DNS rules of EgressFirewall." +echo "-obs | --observability Enable OVN Observability feature." +echo "-uae | --preconfigured-udn-addresses-enable Enable connecting workloads with preconfigured network to user-defined networks" +echo "-rae | --enable-route-advertisements Enable route advertisements" +echo "-adv | --advertise-default-network Applies a RouteAdvertisements configuration to advertise the default network on all nodes" +echo "" } parse_args() { @@ -337,6 +341,8 @@ parse_args() { ;; -nse | --network-segmentation-enable) ENABLE_NETWORK_SEGMENTATION=true ;; + -uae | --preconfigured-udn-addresses-enable) ENABLE_PRE_CONF_UDN_ADDR=true + ;; -rae | --route-advertisements-enable) ENABLE_ROUTE_ADVERTISEMENTS=true ;; -adv | --advertise-default-network) ADVERTISE_DEFAULT_NETWORK=true @@ -434,6 +440,7 @@ print_params() { echo "ENABLE_NETWORK_SEGMENTATION= $ENABLE_NETWORK_SEGMENTATION" echo "ENABLE_ROUTE_ADVERTISEMENTS= $ENABLE_ROUTE_ADVERTISEMENTS" echo "ADVERTISE_DEFAULT_NETWORK = $ADVERTISE_DEFAULT_NETWORK" + echo "ENABLE_PRE_CONF_UDN_ADDR = $ENABLE_PRE_CONF_UDN_ADDR" echo "OVN_ENABLE_INTERCONNECT = $OVN_ENABLE_INTERCONNECT" if [ "$OVN_ENABLE_INTERCONNECT" == true ]; then echo "KIND_NUM_NODES_PER_ZONE = $KIND_NUM_NODES_PER_ZONE" @@ -452,8 +459,8 @@ print_params() { install_jinjanator_renderer() { # ensure jinjanator renderer installed - pip install wheel --user - pip freeze | grep jinjanator || pip install jinjanator[yaml] --user + pipx install jinjanator[yaml] + pipx ensurepath --force >/dev/null export PATH=~/.local/bin:$PATH } @@ -492,11 +499,11 @@ check_dependencies() { fi if ! command_exists jinjanate ; then - if ! command_exists pip ; then - echo "Dependency not met: 'jinjanator' not installed and cannot install with 'pip'" + if ! command_exists pipx ; then + echo "Dependency not met: 'jinjanator' not installed and cannot install with 'pipx'" exit 1 fi - echo "'jinjanate' not found, installing with 'pip'" + echo "'jinjanate' not found, installing with 'pipx'" install_jinjanator_renderer fi @@ -504,6 +511,11 @@ check_dependencies() { echo "Dependency not met: Neither docker nor podman found" exit 1 fi + + if command_exists podman && ! command_exists skopeo; then + echo "Dependency not met: skopeo not installed. Run the following command to install it: 'sudo dnf install skopeo'" + exit 1 + fi } OPENSSL="" @@ -649,6 +661,11 @@ set_default_params() { fi ENABLE_MULTI_NET=${ENABLE_MULTI_NET:-false} ENABLE_NETWORK_SEGMENTATION=${ENABLE_NETWORK_SEGMENTATION:-false} + if [ "$ENABLE_NETWORK_SEGMENTATION" == true ] && [ "$ENABLE_MULTI_NET" != true ]; then + echo "Network segmentation (UDN) requires multi-network to be enabled (-mne)" + exit 1 + fi + ENABLE_ROUTE_ADVERTISEMENTS=${ENABLE_ROUTE_ADVERTISEMENTS:-false} if [ "$ENABLE_ROUTE_ADVERTISEMENTS" == true ] && [ "$ENABLE_MULTI_NET" != true ]; then echo "Route advertisements requires multi-network to be enabled (-mne)" @@ -658,6 +675,16 @@ set_default_params() { echo "Route advertisements requires interconnect to be enabled (-ic)" exit 1 fi + + ENABLE_PRE_CONF_UDN_ADDR=${ENABLE_PRE_CONF_UDN_ADDR:-false} + if [[ $ENABLE_PRE_CONF_UDN_ADDR == true && $ENABLE_NETWORK_SEGMENTATION != true ]]; then + echo "Preconfigured UDN addresses requires network-segmentation to be enabled (-nse)" + exit 1 + fi + if [[ $ENABLE_PRE_CONF_UDN_ADDR == true && $OVN_ENABLE_INTERCONNECT != true ]]; then + echo "Preconfigured UDN addresses requires interconnect to be enabled (-ic)" + exit 1 + fi ADVERTISE_DEFAULT_NETWORK=${ADVERTISE_DEFAULT_NETWORK:-false} OVN_COMPACT_MODE=${OVN_COMPACT_MODE:-false} if [ "$OVN_COMPACT_MODE" == true ]; then @@ -822,6 +849,12 @@ set_ovn_image() { } build_ovn_image() { + local push_args="" + if [ "$OCI_BIN" == "podman" ]; then + # docker doesn't perform tls check by default only podman does, hence we need to disable it for podman. + push_args="--tls-verify=false" + fi + if [ "$OVN_IMAGE" == local ]; then set_ovn_image @@ -834,22 +867,28 @@ build_ovn_image() { # store in local registry if [ "$KIND_LOCAL_REGISTRY" == true ];then echo "Pushing built image to local $OCI_BIN registry" - $OCI_BIN push "${OVN_IMAGE}" + $OCI_BIN push $push_args "$OVN_IMAGE" fi # We should push to local registry if image is not remote elif [ "${OVN_IMAGE}" != "" -a "${KIND_LOCAL_REGISTRY}" == true ] && (echo "$OVN_IMAGE" | grep / -vq); then local local_registry_ovn_image="localhost:5000/${OVN_IMAGE}" $OCI_BIN tag "$OVN_IMAGE" $local_registry_ovn_image OVN_IMAGE=$local_registry_ovn_image - $OCI_BIN push $OVN_IMAGE + $OCI_BIN push $push_args "$OVN_IMAGE" fi } create_ovn_kube_manifests() { local ovnkube_image=${OVN_IMAGE} if [ "$KIND_LOCAL_REGISTRY" == true ];then - # When updating with local registry we have to reference the sha - ovnkube_image=$($OCI_BIN inspect --format='{{index .RepoDigests 0}}' $OVN_IMAGE) + # When updating with local registry we have to reference the image digest (SHA) + # Check the image digest in the local registry because it might be different then the digest in the local container runtime + if [ "$OCI_BIN" == "podman" ]; then + # due to differences how podman and docker persist images, for podman use skopeo to get the image and digest. + ovnkube_image=$(skopeo inspect --format "{{.Name}}@{{.Digest}}" --tls-verify=false "docker://$OVN_IMAGE") + else + ovnkube_image=$($OCI_BIN inspect --format='{{index .RepoDigests 0}}' $OVN_IMAGE) + fi fi pushd ${DIR}/../dist/images if [ "$OVN_ENABLE_INTERCONNECT" == true ]; then @@ -899,6 +938,7 @@ create_ovn_kube_manifests() { --ex-gw-network-interface="${OVN_EX_GW_NETWORK_INTERFACE}" \ --multi-network-enable="${ENABLE_MULTI_NET}" \ --network-segmentation-enable="${ENABLE_NETWORK_SEGMENTATION}" \ + --preconfigured-udn-addresses-enable="${ENABLE_PRE_CONF_UDN_ADDR}" \ --route-advertisements-enable="${ENABLE_ROUTE_ADVERTISEMENTS}" \ --advertise-default-network="${ADVERTISE_DEFAULT_NETWORK}" \ --ovnkube-metrics-scale-enable="${OVN_METRICS_SCALE_ENABLE}" \ diff --git a/dist/images/Dockerfile.fedora b/dist/images/Dockerfile.fedora index e1789bd1e5..fc42191887 100644 --- a/dist/images/Dockerfile.fedora +++ b/dist/images/Dockerfile.fedora @@ -14,7 +14,7 @@ ARG OVN_FROM=koji ############################################# # Stage to get OVN and OVS RPMs from source # ############################################# -FROM fedora:41 AS ovnbuilder +FROM quay.io/fedora/fedora:42 AS ovnbuilder USER root @@ -24,14 +24,28 @@ ENV PYTHONDONTWRITEBYTECODE yes RUN INSTALL_PKGS="git rpm-build dnf-plugins-core" && \ dnf install --best --refresh -y --setopt=tsflags=nodocs $INSTALL_PKGS +# Clone OVN Source Code. +ARG OVN_REPO=https://github.com/ovn-org/ovn.git +ARG OVN_GITREF=main +WORKDIR /root +RUN mkdir ovn && pushd ovn && \ + git init && \ + git remote add origin $OVN_REPO && \ + git fetch origin ${OVN_GITREF} --depth 1 && \ + git reset --hard FETCH_HEAD && \ + popd + # Clone OVS Source Code. ARG OVS_REPO=https://github.com/openvswitch/ovs.git -ARG OVS_GITREF=branch-* +# OVS_GITREF can be set to a specific commit or branch, otherwise the version pinned by OVN will be used. +ARG OVS_GITREF="" WORKDIR /root -RUN mkdir ovs && pushd ovs && \ +RUN OVS_OVN_GITREF=$(cd ovn && git submodule status ovs|cut -c 2-|cut -d ' ' -f 1) && \ + mkdir ovs && pushd ovs && \ git init && \ git remote add origin $OVS_REPO && \ - git fetch $OVS_REPO $(git ls-remote origin "${OVS_GITREF}" | sort -V -k2 | tail -1 | awk '{print $1}') --depth 1 && \ + OVS_GITREF="${OVS_GITREF:-$OVS_OVN_GITREF}" && \ + git fetch $OVS_REPO ${OVS_GITREF} --depth 1 && \ git reset --hard FETCH_HEAD && \ echo "1" && \ find rhel && \ @@ -48,16 +62,6 @@ RUN rm rpm/rpmbuild/RPMS/x86_64/*debug* RUN rm rpm/rpmbuild/RPMS/x86_64/*devel* RUN git log -n 1 -# Clone OVN Source Code. -ARG OVN_REPO=https://github.com/ovn-org/ovn.git -ARG OVN_GITREF=main -WORKDIR /root -RUN mkdir ovn && pushd ovn && \ - git init && \ - git remote add origin $OVN_REPO && \ - git fetch origin ${OVN_GITREF} --depth 1 && \ - git reset --hard FETCH_HEAD && \ - popd # Build OVN rpms. WORKDIR /root/ovn/ @@ -74,8 +78,8 @@ RUN git log -n 1 ######################################## # Stage to download OVN RPMs from koji # ######################################## -FROM fedora:41 AS kojidownloader -ARG ovnver=ovn-24.09.2-71.fc41 +FROM quay.io/fedora/fedora:42 AS kojidownloader +ARG ovnver=ovn-25.03.1-42.fc42 USER root @@ -95,14 +99,14 @@ RUN if [ "$TARGETPLATFORM" = "linux/amd64" ] || [ -z "$TARGETPLATFORM"] ; then k ###################################### # Stage to copy OVN RPMs from source # ###################################### -FROM fedora:41 AS source +FROM quay.io/fedora/fedora:42 AS source COPY --from=ovnbuilder /root/ovn/rpm/rpmbuild/RPMS/x86_64/*.rpm / COPY --from=ovnbuilder /root/ovs/rpm/rpmbuild/RPMS/x86_64/*.rpm / #################################### # Stage to copy OVN RPMs from koji # #################################### -FROM fedora:41 AS koji +FROM quay.io/fedora/fedora:42 AS koji COPY --from=kojidownloader /*.rpm / diff --git a/dist/images/Dockerfile.ubuntu b/dist/images/Dockerfile.ubuntu index 10addc57d4..7fedefa624 100644 --- a/dist/images/Dockerfile.ubuntu +++ b/dist/images/Dockerfile.ubuntu @@ -8,14 +8,12 @@ # # So this file will change over time. -FROM ubuntu:24.10 +FROM ubuntu:25.04 USER root RUN apt-get update && apt-get install -y iproute2 curl software-properties-common util-linux nftables -RUN curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - - # Install OVS and OVN packages. RUN apt-get update && apt-get install -y openvswitch-switch openvswitch-common ovn-central ovn-common ovn-host diff --git a/dist/images/Dockerfile.ubuntu.arm64 b/dist/images/Dockerfile.ubuntu.arm64 index 48a408b036..3830641cf0 100644 --- a/dist/images/Dockerfile.ubuntu.arm64 +++ b/dist/images/Dockerfile.ubuntu.arm64 @@ -8,14 +8,12 @@ # # So this file will change over time. -FROM ubuntu:24.10 +FROM ubuntu:25.04 USER root RUN apt-get update && apt-get install -y iproute2 curl software-properties-common util-linux nftables -RUN curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - - # Install OVS and OVN packages. RUN apt-get update && apt-get install -y openvswitch-switch openvswitch-common ovn-central ovn-common ovn-host diff --git a/dist/images/daemonset.sh b/dist/images/daemonset.sh index 95e4a503e8..0613a37238 100755 --- a/dist/images/daemonset.sh +++ b/dist/images/daemonset.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash #set -x #Always exit on errors @@ -71,6 +71,7 @@ OVN_EGRESSSERVICE_ENABLE= OVN_DISABLE_OVN_IFACE_ID_VER="false" OVN_MULTI_NETWORK_ENABLE= OVN_NETWORK_SEGMENTATION_ENABLE= +OVN_PRE_CONF_UDN_ADDR_ENABLE= OVN_ROUTE_ADVERTISEMENTS_ENABLE= OVN_ADVERTISE_DEFAULT_NETWORK= OVN_V4_JOIN_SUBNET="" @@ -273,6 +274,9 @@ while [ "$1" != "" ]; do --network-segmentation-enable) OVN_NETWORK_SEGMENTATION_ENABLE=$VALUE ;; + --preconfigured-udn-addresses-enable) + OVN_PRE_CONF_UDN_ADDR_ENABLE=$VALUE + ;; --route-advertisements-enable) OVN_ROUTE_ADVERTISEMENTS_ENABLE=$VALUE ;; @@ -468,6 +472,8 @@ ovn_multi_network_enable=${OVN_MULTI_NETWORK_ENABLE} echo "ovn_multi_network_enable: ${ovn_multi_network_enable}" ovn_network_segmentation_enable=${OVN_NETWORK_SEGMENTATION_ENABLE} echo "ovn_network_segmentation_enable: ${ovn_network_segmentation_enable}" +ovn_pre_conf_udn_addr_enable=${OVN_PRE_CONF_UDN_ADDR_ENABLE} +echo "ovn_pre_conf_udn_addr_enable: ${ovn_pre_conf_udn_addr_enable}" ovn_route_advertisements_enable=${OVN_ROUTE_ADVERTISEMENTS_ENABLE} echo "ovn_route_advertisements_enable: ${ovn_route_advertisements_enable}" ovn_advertise_default_network=${OVN_ADVERTISE_DEFAULT_NETWORK} @@ -612,6 +618,7 @@ ovn_image=${ovnkube_image} \ ovn_egress_ip_healthcheck_port=${ovn_egress_ip_healthcheck_port} \ ovn_multi_network_enable=${ovn_multi_network_enable} \ ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ + ovn_pre_conf_udn_addr_enable=${ovn_pre_conf_udn_addr_enable} \ ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ ovn_egress_service_enable=${ovn_egress_service_enable} \ ovn_ssl_en=${ovn_ssl_en} \ @@ -814,6 +821,7 @@ ovn_image=${ovnkube_image} \ ovn_egress_qos_enable=${ovn_egress_qos_enable} \ ovn_multi_network_enable=${ovn_multi_network_enable} \ ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ + ovn_pre_conf_udn_addr_enable=${ovn_pre_conf_udn_addr_enable} \ ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ ovn_egress_service_enable=${ovn_egress_service_enable} \ ovn_ssl_en=${ovn_ssl_en} \ @@ -894,6 +902,7 @@ ovn_image=${ovnkube_image} \ ovn_egress_qos_enable=${ovn_egress_qos_enable} \ ovn_multi_network_enable=${ovn_multi_network_enable} \ ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ + ovn_pre_conf_udn_addr_enable=${ovn_pre_conf_udn_addr_enable} \ ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ ovn_egress_service_enable=${ovn_egress_service_enable} \ ovn_ssl_en=${ovn_ssl_en} \ @@ -961,6 +970,7 @@ ovn_image=${ovnkube_image} \ ovn_egress_qos_enable=${ovn_egress_qos_enable} \ ovn_multi_network_enable=${ovn_multi_network_enable} \ ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ + ovn_pre_conf_udn_addr_enable=${ovn_pre_conf_udn_addr_enable} \ ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ ovn_ssl_en=${ovn_ssl_en} \ ovn_remote_probe_interval=${ovn_remote_probe_interval} \ @@ -1057,6 +1067,7 @@ ovn_enable_dnsnameresolver=${ovn_enable_dnsnameresolver} \ jinjanate ../templates/rbac-ovnkube-node.yaml.j2 -o ${output_dir}/rbac-ovnkube-node.yaml ovn_network_segmentation_enable=${ovn_network_segmentation_enable} \ +ovn_pre_conf_udn_addr_enable=${ovn_pre_conf_udn_addr_enable} \ ovn_enable_dnsnameresolver=${ovn_enable_dnsnameresolver} \ ovn_route_advertisements_enable=${ovn_route_advertisements_enable} \ jinjanate ../templates/rbac-ovnkube-cluster-manager.yaml.j2 -o ${output_dir}/rbac-ovnkube-cluster-manager.yaml diff --git a/dist/images/ovn-config.sh b/dist/images/ovn-config.sh index 69e2c6471c..42e0e1253a 100755 --- a/dist/images/ovn-config.sh +++ b/dist/images/ovn-config.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # run on master to configure ovn-kubernetes # The /etc/openvswitch/ovn_k8s.conf and /etc/sysconfig/ovn-kubernetes diff --git a/dist/images/ovn-run.sh b/dist/images/ovn-run.sh index 9f5acfcdca..d684547434 100755 --- a/dist/images/ovn-run.sh +++ b/dist/images/ovn-run.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # run the ovs-vswitchd daemon from a container diff --git a/dist/images/ovndb-raft-functions.sh b/dist/images/ovndb-raft-functions.sh index 4d6e124f2d..8737ca3b50 100644 --- a/dist/images/ovndb-raft-functions.sh +++ b/dist/images/ovndb-raft-functions.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash #set -euo pipefail verify-ovsdb-raft() { diff --git a/dist/images/ovnkube.sh b/dist/images/ovnkube.sh index ae77d2f13b..e016ce4a47 100755 --- a/dist/images/ovnkube.sh +++ b/dist/images/ovnkube.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash #set -euo pipefail # Enable verbose shell output if OVNKUBE_SH_VERBOSE is set to 'true' @@ -269,6 +269,8 @@ ovn_disable_ovn_iface_id_ver=${OVN_DISABLE_OVN_IFACE_ID_VER:-false} ovn_multi_network_enable=${OVN_MULTI_NETWORK_ENABLE:-false} #OVN_NETWORK_SEGMENTATION_ENABLE - enable user defined primary networks for ovn-kubernetes ovn_network_segmentation_enable=${OVN_NETWORK_SEGMENTATION_ENABLE:=false} +#OVN_PRE_CONF_UDN_ADDR_ENABLE - enable connecting workloads with custom network configuration to UDNs +ovn_pre_conf_udn_addr_enable=${OVN_PRE_CONF_UDN_ADDR_ENABLE:=false} #OVN_NROUTE_ADVERTISEMENTS_ENABLE - enable route advertisements for ovn-kubernetes ovn_route_advertisements_enable=${OVN_ROUTE_ADVERTISEMENTS_ENABLE:=false} ovn_acl_logging_rate_limit=${OVN_ACL_LOGGING_RATE_LIMIT:-"20"} @@ -324,6 +326,16 @@ ovn_nohostsubnet_label=${OVN_NOHOSTSUBNET_LABEL:-""} # should be set to true when dpu nodes are in the cluster ovn_disable_requestedchassis=${OVN_DISABLE_REQUESTEDCHASSIS:-false} +# external_ids:host-k8s-nodename is set on an Open_vSwitch enabled system if the ovnkube stack +# should function on behalf of a different host than external_ids:hostname. This includes +# all the components that belond in an ovnkube stack (i.e. NB DB, SB DB, ovnkube etc) +# overwrite the K8S_NODE env var with the one found within the OVS metadata in this case +ovn_k8s_node=$(ovs-vsctl --if-exists get Open_vSwitch . external_ids:host-k8s-nodename | tr -d '\"') +if [[ ! -z $ovn_k8s_node ]]; then + echo "host-k8s-nodename is set, overriding K8S_NODE with $ovn_k8s_node" + K8S_NODE=$ovn_k8s_node +fi + # Determine the ovn rundir. if [[ -f /usr/bin/ovn-appctl ]]; then # ovn-appctl is present. Use new ovn run dir path. @@ -977,6 +989,11 @@ local-nb-ovsdb() { wait_for_event attempts=3 process_ready ovnnb_db echo "=============== nb-ovsdb (unix sockets only) ========== RUNNING" + [[ "local" == "${OVN_GATEWAY_MODE}" && "true" == "${OVN_ROUTE_ADVERTISEMENTS_ENABLE}" ]] && { + ovn-nbctl set NB_Global . options:use_ct_inv_match=false + echo "=============== nb-ovsdb ========== reconfigured for route advertisements" + } + # Let ovn-northd sleep and not use so much CPU ovn-nbctl set NB_Global . options:northd-backoff-interval-ms=${ovn_northd_backoff_interval} echo "=============== nb-ovsdb ========== reconfigured for northd backoff" @@ -1259,7 +1276,7 @@ ovn-master() { ovnkube_metrics_scale_enable_flag="--metrics-enable-scale --metrics-enable-pprof" fi echo "ovnkube_metrics_scale_enable_flag: ${ovnkube_metrics_scale_enable_flag}" - + ovn_stateless_netpol_enable_flag= if [[ ${ovn_stateless_netpol_enable} == "true" ]]; then ovn_stateless_netpol_enable_flag="--enable-stateless-netpol" @@ -1283,7 +1300,7 @@ ovn-master() { ovn_observ_enable_flag="--enable-observability" fi echo "ovn_observ_enable_flag=${ovn_observ_enable_flag}" - + nohostsubnet_label_option= if [[ ${ovn_nohostsubnet_label} != "" ]]; then nohostsubnet_label_option="--no-hostsubnet-nodes=${ovn_nohostsubnet_label}" @@ -1356,6 +1373,7 @@ ovn-master() { ${network_qos_enabled_flag} \ ${ovn_enable_dnsnameresolver_flag} \ ${nohostsubnet_label_option} \ + ${ovn_stateless_netpol_enable_flag} \ ${ovn_disable_requestedchassis_flag} \ --cluster-subnets ${net_cidr} --k8s-service-cidr=${svc_cidr} \ --gateway-mode=${ovn_gateway_mode} ${ovn_gateway_opts} \ @@ -1528,6 +1546,12 @@ ovnkube-controller() { fi echo "network_segmentation_enabled_flag=${network_segmentation_enabled_flag}" + pre_conf_udn_addr_enable_flag= + if [[ ${ovn_pre_conf_udn_addr_enable} == "true" ]]; then + pre_conf_udn_addr_enable_flag="--enable-preconfigured-udn-addresses" + fi + echo "pre_conf_udn_addr_enable_flag=${pre_conf_udn_addr_enable_flag}" + route_advertisements_enabled_flag= if [[ ${ovn_route_advertisements_enable} == "true" ]]; then route_advertisements_enabled_flag="--enable-route-advertisements" @@ -1626,6 +1650,13 @@ ovnkube-controller() { fi echo "ovn_observ_enable_flag=${ovn_observ_enable_flag}" + + ovn_stateless_netpol_enable_flag= + if [[ ${ovn_stateless_netpol_enable} == "true" ]]; then + ovn_stateless_netpol_enable_flag="--enable-stateless-netpol" + fi + echo "ovn_stateless_netpol_enable_flag: ${ovn_stateless_netpol_enable_flag}" + echo "=============== ovnkube-controller ========== MASTER ONLY" /usr/bin/ovnkube --init-ovnkube-controller ${K8S_NODE} \ ${anp_enabled_flag} \ @@ -1641,6 +1672,7 @@ ovnkube-controller() { ${multicast_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ + ${pre_conf_udn_addr_enable_flag} \ ${route_advertisements_enabled_flag} \ ${ovn_acl_logging_rate_limit_flag} \ ${ovn_dbs} \ @@ -1825,6 +1857,12 @@ ovnkube-controller-with-node() { fi echo "network_segmentation_enabled_flag=${network_segmentation_enabled_flag}" + pre_conf_udn_addr_enable_flag= + if [[ ${ovn_pre_conf_udn_addr_enable} == "true" ]]; then + pre_conf_udn_addr_enable_flag="--enable-preconfigured-udn-addresses" + fi + echo "pre_conf_udn_addr_enable_flag=${pre_conf_udn_addr_enable_flag}" + route_advertisements_enabled_flag= if [[ ${ovn_route_advertisements_enable} == "true" ]]; then route_advertisements_enabled_flag="--enable-route-advertisements" @@ -1943,7 +1981,7 @@ ovnkube-controller-with-node() { if test -z "${OVN_UNPRIVILEGED_MODE+x}" -o "x${OVN_UNPRIVILEGED_MODE}" = xno; then ovn_unprivileged_flag="" fi - + ovn_metrics_bind_address="${metrics_endpoint_ip}:${metrics_bind_port}" metrics_bind_address="${metrics_endpoint_ip}:${metrics_worker_port}" echo "ovn_metrics_bind_address=${ovn_metrics_bind_address}" @@ -2054,6 +2092,17 @@ ovnkube-controller-with-node() { fi echo "ovn_observ_enable_flag=${ovn_observ_enable_flag}" + ovn_stateless_netpol_enable_flag= + if [[ ${ovn_stateless_netpol_enable} == "true" ]]; then + ovn_stateless_netpol_enable_flag="--enable-stateless-netpol" + fi + + ovn_disable_requestedchassis_flag= + if [[ ${ovn_disable_requestedchassis} == "true" ]]; then + ovn_disable_requestedchassis_flag="--disable-requestedchassis" + fi + echo "ovn_disable_requestedchassis_flag=${ovn_disable_requestedchassis_flag}" + echo "=============== ovnkube-controller-with-node --init-ovnkube-controller-with-node==========" /usr/bin/ovnkube --init-ovnkube-controller ${K8S_NODE} --init-node ${K8S_NODE} \ ${anp_enabled_flag} \ @@ -2079,6 +2128,7 @@ ovnkube-controller-with-node() { ${multicast_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ + ${pre_conf_udn_addr_enable_flag} \ ${route_advertisements_enabled_flag} \ ${netflow_targets} \ ${ofctrl_wait_before_clear} \ @@ -2106,6 +2156,7 @@ ovnkube-controller-with-node() { ${ssl_opts} \ ${network_qos_enabled_flag} \ ${ovn_enable_dnsnameresolver_flag} \ + ${ovn_disable_requestedchassis_flag} \ --cluster-subnets ${net_cidr} --k8s-service-cidr=${svc_cidr} \ --export-ovs-metrics \ --gateway-mode=${ovn_gateway_mode} ${ovn_gateway_opts} \ @@ -2123,7 +2174,6 @@ ovnkube-controller-with-node() { --nodeport \ --ovn-metrics-bind-address ${ovn_metrics_bind_address} \ --pidfile ${OVN_RUNDIR}/ovnkube-controller-with-node.pid \ - --disable-udn-host-isolation \ --zone ${ovn_zone} & wait_for_event attempts=3 process_ready ovnkube-controller-with-node @@ -2246,6 +2296,12 @@ ovn-cluster-manager() { fi echo "network_segmentation_enabled_flag=${network_segmentation_enabled_flag}" + pre_conf_udn_addr_enable_flag= + if [[ ${ovn_pre_conf_udn_addr_enable} == "true" ]]; then + pre_conf_udn_addr_enable_flag="--enable-preconfigured-udn-addresses" + fi + echo "pre_conf_udn_addr_enable_flag=${pre_conf_udn_addr_enable_flag}" + route_advertisements_enabled_flag= if [[ ${ovn_route_advertisements_enable} == "true" ]]; then route_advertisements_enabled_flag="--enable-route-advertisements" @@ -2313,6 +2369,7 @@ ovn-cluster-manager() { ${multicast_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ + ${pre_conf_udn_addr_enable_flag} \ ${route_advertisements_enabled_flag} \ ${persistent_ips_enabled_flag} \ ${ovnkube_enable_interconnect_flag} \ @@ -2399,8 +2456,15 @@ ovn-node() { wait_for_event ovs_ready fi - echo "=============== ovn-node - (wait for ready_to_start_node)" - wait_for_event ready_to_start_node + if [[ ${ovnkube_node_mode} == "dpu-host" ]] && [[ ${ovn_enable_interconnect} == "true" ]]; then + # ready_to_start_node checks for the NB/SB readiness state. + # This is not available on the DPU host when interconnect is enabled, + # because the DBs will run locally on the DPU + echo "skipping ready_to_start_node on DPU Host and when interconnect is true" + else + echo "=============== ovn-node - (wait for ready_to_start_node)" + wait_for_event ready_to_start_node + fi echo "ovn_nbdb ${ovn_nbdb} ovn_sbdb ${ovn_sbdb} ovn_nbdb_conn ${ovn_nbdb_conn}" @@ -2483,6 +2547,11 @@ ovn-node() { network_segmentation_enabled_flag="--enable-multi-network --enable-network-segmentation" fi + pre_conf_udn_addr_enable_flag= + if [[ ${ovn_pre_conf_udn_addr_enable} == "true" ]]; then + pre_conf_udn_addr_enable_flag="--enable-preconfigured-udn-addresses" + fi + route_advertisements_enabled_flag= if [[ ${ovn_route_advertisements_enable} == "true" ]]; then route_advertisements_enabled_flag="--enable-route-advertisements" @@ -2578,12 +2647,6 @@ ovn-node() { fi if [[ ${ovnkube_node_mode} == "dpu" ]]; then - # in the case of dpu mode we want the host K8s Node Name and not the DPU K8s Node Name - K8S_NODE=$(ovs-vsctl --if-exists get Open_vSwitch . external_ids:host-k8s-nodename | tr -d '\"') - if [[ ${K8S_NODE} == "" ]]; then - echo "Couldn't get the required Host K8s Nodename. Exiting..." - exit 1 - fi if [[ ${ovn_gateway_opts} == "" ]]; then # get the gateway interface gw_iface=$(ovs-vsctl --if-exists get Open_vSwitch . external_ids:ovn-gw-interface | tr -d \") @@ -2724,6 +2787,7 @@ ovn-node() { ${multicast_enabled_flag} \ ${multi_network_enabled_flag} \ ${network_segmentation_enabled_flag} \ + ${pre_conf_udn_addr_enable_flag} \ ${route_advertisements_enabled_flag} \ ${netflow_targets} \ ${ofctrl_wait_before_clear} \ @@ -2761,7 +2825,6 @@ ovn-node() { --nodeport \ --ovn-metrics-bind-address ${ovn_metrics_bind_address} \ --pidfile ${OVN_RUNDIR}/ovnkube.pid \ - --disable-udn-host-isolation \ --zone ${ovn_zone} & wait_for_event attempts=3 process_ready ovnkube diff --git a/dist/images/push_manifest.sh b/dist/images/push_manifest.sh index f82531df8d..f42c8c30f9 100755 --- a/dist/images/push_manifest.sh +++ b/dist/images/push_manifest.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Currently supported platforms of multi-arch images are: amd64 arm64 LINUX_ARCH=(amd64 arm64) diff --git a/dist/install-ovn-k8s.sh b/dist/install-ovn-k8s.sh index 7ffdc0ac0e..d4b72e8e39 100755 --- a/dist/install-ovn-k8s.sh +++ b/dist/install-ovn-k8s.sh @@ -1,4 +1,4 @@ -#!/bin/bash -ex +#!/usr/bin/env bash -ex # shellcheck disable=SC2016 SCRIPTS_DIR=$(dirname "${BASH_SOURCE[0]}") diff --git a/dist/templates/ovn-setup.yaml.j2 b/dist/templates/ovn-setup.yaml.j2 index 8112e06670..981a362859 100644 --- a/dist/templates/ovn-setup.yaml.j2 +++ b/dist/templates/ovn-setup.yaml.j2 @@ -89,7 +89,9 @@ spec: networkSelectors: - networkSelectionType: DefaultNetwork nodeSelector: {} - frrConfigurationSelector: {} + frrConfigurationSelector: + matchLabels: + name: receive-all advertisements: - "PodNetwork" {%- endif %} diff --git a/dist/templates/ovnkube-control-plane.yaml.j2 b/dist/templates/ovnkube-control-plane.yaml.j2 index 2373f38cff..13403ceb31 100644 --- a/dist/templates/ovnkube-control-plane.yaml.j2 +++ b/dist/templates/ovnkube-control-plane.yaml.j2 @@ -146,6 +146,8 @@ spec: value: "{{ ovn_multi_network_enable }}" - name: OVN_NETWORK_SEGMENTATION_ENABLE value: "{{ ovn_network_segmentation_enable }}" + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: "{{ ovn_pre_conf_udn_addr_enable }}" - name: OVN_ROUTE_ADVERTISEMENTS_ENABLE value: "{{ ovn_route_advertisements_enable }}" - name: OVN_HYBRID_OVERLAY_NET_CIDR diff --git a/dist/templates/ovnkube-node.yaml.j2 b/dist/templates/ovnkube-node.yaml.j2 index 98591a5ac1..2bf8e5ed2a 100644 --- a/dist/templates/ovnkube-node.yaml.j2 +++ b/dist/templates/ovnkube-node.yaml.j2 @@ -255,6 +255,10 @@ spec: - name: OVNKUBE_NODE_MODE value: "dpu" {% endif -%} + {% if ovnkube_app_name!="ovnkube-node-dpu" and ovnkube_app_name!="ovnkube-node-dpu-host" -%} + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: "{{ ovn_pre_conf_udn_addr_enable }}" + {% endif -%} - name: OVNKUBE_NODE_MGMT_PORT_NETDEV value: "{{ ovnkube_node_mgmt_port_netdev }}" - name: OVN_HOST_NETWORK_NAMESPACE diff --git a/dist/templates/ovnkube-single-node-zone.yaml.j2 b/dist/templates/ovnkube-single-node-zone.yaml.j2 index d2d485cca7..ad4e4488f9 100644 --- a/dist/templates/ovnkube-single-node-zone.yaml.j2 +++ b/dist/templates/ovnkube-single-node-zone.yaml.j2 @@ -79,6 +79,10 @@ spec: value: "{{ ovn_loglevel_nb }}" - name: OVN_NORTHD_BACKOFF_INTERVAL value: "{{ ovn_northd_backoff_interval }}" + - name: OVN_GATEWAY_MODE + value: "{{ ovn_gateway_mode }}" + - name: OVN_ROUTE_ADVERTISEMENTS_ENABLE + value: "{{ ovn_route_advertisements_enable }}" - name: K8S_APISERVER valueFrom: configMapKeyRef: @@ -433,6 +437,8 @@ spec: value: "{{ ovn_multi_network_enable }}" - name: OVN_NETWORK_SEGMENTATION_ENABLE value: "{{ ovn_network_segmentation_enable }}" + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: "{{ ovn_pre_conf_udn_addr_enable }}" - name: OVN_ROUTE_ADVERTISEMENTS_ENABLE value: "{{ ovn_route_advertisements_enable }}" - name: OVNKUBE_NODE_MGMT_PORT_NETDEV diff --git a/dist/templates/ovnkube-zone-controller.yaml.j2 b/dist/templates/ovnkube-zone-controller.yaml.j2 index 363ade3014..cc87fe1a53 100644 --- a/dist/templates/ovnkube-zone-controller.yaml.j2 +++ b/dist/templates/ovnkube-zone-controller.yaml.j2 @@ -345,6 +345,8 @@ spec: value: "{{ ovn_multi_network_enable }}" - name: OVN_NETWORK_SEGMENTATION_ENABLE value: "{{ ovn_network_segmentation_enable }}" + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: "{{ ovn_pre_conf_udn_addr_enable }}" - name: OVN_ROUTE_ADVERTISEMENTS_ENABLE value: "{{ ovn_route_advertisements_enable }}" - name: OVN_HYBRID_OVERLAY_NET_CIDR diff --git a/docs/developer-guide/developer.md b/docs/developer-guide/developer.md index d67bd62a9d..3dc7d0dfb8 100644 --- a/docs/developer-guide/developer.md +++ b/docs/developer-guide/developer.md @@ -5,11 +5,12 @@ This file aims to have information that is useful to the people contributing to ## Generating ovsdb bindings using modelgen In order to generate the latest NBDB and SBDB bindings, we have a tool called `modelgen` -which lives in the libovsdb repo: https://github.com/ovn-org/libovsdb#modelgen. It is a +which lives in the libovsdb repo: https://github.com/ovn-kubernetes/libovsdb#modelgen. It is a [code generator](https://go.dev/blog/generate) that uses `pkg/nbdb/gen.go` and `pkg/sbdb/gen.go` files to auto-generate the models and additional code like deep-copy methods. In order to use this tool do the following: + ``` $ cd go-controller/ $ make modelgen diff --git a/docs/features/hardware-offload/derive-from-mgmt-port.md b/docs/features/hardware-offload/derive-from-mgmt-port.md new file mode 100644 index 0000000000..e7d7f38194 --- /dev/null +++ b/docs/features/hardware-offload/derive-from-mgmt-port.md @@ -0,0 +1,163 @@ +# From PCI Address Gateway Interface Feature + +## Overview + +The "derive-from-mgmt-port" gateway interface feature is a new capability in OVN-Kubernetes that enables automatic gateway interface resolution in DPU (Data Processing Unit) host mode deployments. This feature automatically discovers and configures the appropriate Physical Function (PF) interface as the gateway interface based on the Virtual Function (VF) used for the management port. + +## Problem Statement + +In DPU deployments, the host typically has access to Virtual Functions (VFs) for management purposes, while the Physical Functions (PFs) are used for external connectivity. Previously, administrators had to manually specify the gateway interface, which required: + +1. Knowledge of the hardware topology +2. Manual mapping of VF to PF relationships +3. Configuration updates when hardware changes +4. Potential for misconfiguration + +## Solution + +The "derive-from-mgmt-port" feature automates the gateway interface discovery process by: + +1. **Automatic Discovery**: Automatically finds the PF interface associated with the management port VF +2. **Hardware Abstraction**: Eliminates the need for manual hardware topology knowledge +3. **Dynamic Configuration**: Adapts to hardware changes automatically +4. **Reduced Configuration**: Simplifies deployment configuration + +## Benefits + +### For Administrators + +- **Simplified Configuration**: No need to manually specify gateway interfaces +- **Reduced Errors**: Eliminates manual mapping errors +- **Hardware Agnostic**: Works with any SR-IOV capable hardware +- **Dynamic Adaptation**: Automatically adapts to hardware changes + +### For Operations + +- **Faster Deployment**: Reduced configuration time +- **Consistent Setup**: Standardized gateway interface selection +- **Reduced Maintenance**: Less manual intervention required +- **Better Reliability**: Fewer configuration-related issues + +### For Development + +- **Cleaner Code**: Centralized gateway interface logic +- **Better Testing**: Comprehensive unit test coverage +- **Extensible Design**: Foundation for future enhancements + +## Technical Implementation + +### Code Changes + +1. **New Constant**: Added `DeriveFromMgmtPort = "derive-from-mgmt-port"` constant in `go-controller/pkg/types/const.go` +2. **Enhanced Logic**: Extended gateway initialization in `go-controller/pkg/node/default_node_network_controller.go` +3. **Comprehensive Testing**: Added unit tests covering success and failure scenarios + +### Key Functions + +- `getManagementPortNetDev()`: Resolves management port device name +- `GetPciFromNetDevice()`: Retrieves PCI address from network device +- `GetPfPciFromVfPci()`: Resolves PF PCI address from VF PCI address +- `GetNetDevicesFromPci()`: Discovers network devices associated with PCI address + +### Error Handling + +The implementation includes robust error handling for: +- Missing network devices +- PCI address resolution failures +- SR-IOV operation failures +- Hardware compatibility issues + +## Configuration Examples + +### Basic Configuration + +```bash +--ovnkube-node-mode=dpu-host +--ovnkube-node-mgmt-port-netdev=pf0vf0 +--gateway-interface=derive-from-mgmt-port +``` + +### Helm Configuration + +```yaml +ovnkube-node: + mode: dpu-host + mgmtPortNetdev: pf0vf0 + +gateway: + interface: derive-from-mgmt-port +``` + +### Configuration File + +```ini +[OvnKubeNode] +mode=dpu-host +mgmt-port-netdev=pf0vf0 + +[Gateway] +interface=derive-from-mgmt-port +``` + +## Migration Guide + +### From Manual Configuration + +**Before:** +```bash +--gateway-interface=eth0 +``` + +**After:** +```bash +--gateway-interface=derive-from-mgmt-port +``` + +### Verification Steps + +1. Verify SR-IOV configuration is correct +2. Ensure management port device is properly configured +3. Check that PF interfaces are available +4. Monitor logs for successful gateway interface resolution + +## Testing + +### Unit Tests + +Comprehensive unit tests cover: +- Successful gateway interface resolution +- Error handling for missing devices +- PCI address resolution failures +- Network device discovery failures + +### Integration Tests + +The feature integrates with existing: +- Gateway initialization +- DPU host mode functionality +- SR-IOV operations +- Network configuration + +## Future Enhancements + +Potential improvements include: +- Support for multiple gateway interfaces +- Enhanced device selection criteria +- Integration with device plugins +- Support for non-SR-IOV hardware +- Advanced error reporting and diagnostics + +## Related Documentation + +- [DPU Gateway Interface Configuration](dpu-gateway-interface.md) +- [DPU Support](dpu-support.md) +- [Gateway Accelerated Interface Configuration](../design/gateway-accelerated-interface-configuration.md) +- [Configuration Guide](../../getting-started/configuration.md) + +## Support + +For issues related to this feature: +1. Check the troubleshooting section in the DPU Gateway Interface Configuration guide +2. Verify SR-IOV hardware and driver support +3. Review error messages and logs +4. Consult the OVN-Kubernetes community for additional support \ No newline at end of file diff --git a/docs/features/hardware-offload/dpu-gateway-interface.md b/docs/features/hardware-offload/dpu-gateway-interface.md new file mode 100644 index 0000000000..49f3e6ccec --- /dev/null +++ b/docs/features/hardware-offload/dpu-gateway-interface.md @@ -0,0 +1,208 @@ +# DPU Gateway Interface Configuration + +## Overview + +In DPU (Data Processing Unit) host mode deployments, OVN-Kubernetes supports automatic gateway interface resolution from PCI address. This feature is particularly useful when the management port is a Virtual Function (VF) and you want to automatically select the corresponding Physical Function (PF) interface as the gateway. + +## Background + +In DPU deployments, the host typically has access to Virtual Functions (VFs) for management purposes, while the Physical Functions (PFs) are used for external connectivity. The "derive-from-mgmt-port" feature allows OVN-Kubernetes to automatically discover and configure the appropriate PF interface as the gateway interface based on the VF used for the management port. + +## How It Works + +When configured with `--gateway-interface=derive-from-mgmt-port`, OVN-Kubernetes performs the following steps: + +1. **Management Port Resolution**: Gets the management port network device name (specified by `--ovnkube-node-mgmt-port-netdev`) +2. **VF PCI Address Retrieval**: Retrieves the PCI address of the management port device (VF) +3. **PF PCI Address Resolution**: Gets the Physical Function (PF) PCI address from the Virtual Function (VF) PCI address +4. **Network Device Discovery**: Retrieves all network devices associated with the PF PCI address +5. **Interface Selection**: Selects the first available network device as the gateway interface + +## Configuration + +### Command Line Options + +```bash +--ovnkube-node-mode=dpu-host +--ovnkube-node-mgmt-port-netdev=pf0vf0 +--gateway-interface=derive-from-mgmt-port +``` + +### Configuration File + +```ini +[OvnKubeNode] +mode=dpu-host +mgmt-port-netdev=pf0vf0 + +[Gateway] +interface=derive-from-mgmt-port +``` + +### Helm Configuration + +```yaml +ovnkube-node: + mode: dpu-host + mgmtPortNetdev: pf0vf0 + +gateway: + interface: derive-from-mgmt-port +``` + +## Example Scenario + +Consider a DPU setup with the following configuration: + +- **Management port device**: `pf0vf0` (Virtual Function) +- **VF PCI address**: `0000:01:02.3` +- **PF PCI address**: `0000:01:00.0` +- **Available PF interfaces**: `eth0`, `eth1` + +With `--gateway-interface=derive-from-mgmt-port`, OVN-Kubernetes will: + +1. Start with the management port device `pf0vf0` +2. Get its PCI address `0000:01:02.3` +3. Resolve the PF PCI address to `0000:01:00.0` +4. Find all network devices associated with PF `0000:01:00.0`: `eth0`, `eth1` +5. Select `eth0` (first device) as the gateway interface + +## Requirements + +### Hardware Requirements + +- SR-IOV capable network interface card +- Virtual Function (VF) and Physical Function (PF) setup +- Management port configured as a VF + +### Software Requirements + +- SR-IOV utilities available on the system +- OVN-Kubernetes running in DPU host mode +- Proper VF/PF driver support + +### Configuration Requirements + +- Must be used in DPU host mode (`--ovnkube-node-mode=dpu-host`) +- Management port netdev must be specified (`--ovnkube-node-mgmt-port-netdev`) +- Gateway interface must be set to `derive-from-mgmt-port` + +## Error Handling + +The system will return an error in the following scenarios: + +### No Network Devices Found + +``` +no netdevs found for pci address 0000:01:00.0 +``` + +**Cause**: The PF PCI address doesn't have any associated network devices. + +**Resolution**: Verify that the PF has network interfaces configured and are visible to the system. + +### PCI Address Resolution Failure + +``` +failed to get PCI address +``` + +**Cause**: Unable to retrieve the PCI address from the management port device. + +**Resolution**: Ensure the management port device exists and is properly configured. + +### PF PCI Address Resolution Failure + +``` +failed to get PF PCI address +``` + +**Cause**: Unable to resolve the PF PCI address from the VF PCI address. + +**Resolution**: Verify SR-IOV configuration and driver support. + +### Network Device Discovery Failure + +``` +failed to get network devices +``` + +**Cause**: Unable to retrieve network devices associated with the PF PCI address. + +**Resolution**: Check SR-IOV utilities and system configuration. + +## Troubleshooting + +### Verify SR-IOV Configuration + +```bash +# Check if SR-IOV is enabled +lspci | grep -i ethernet + +# Check VF configuration +ip link show + +# Check PF/VF relationship +ls /sys/bus/pci/devices/*/virtfn* +``` + +### Verify Management Port Device + +```bash +# Check if management port device exists +ip link show pf0vf0 + +# Check PCI address +ethtool -i pf0vf0 | grep bus-info +``` + +### Debug PCI Address Resolution + +```bash +# Get VF PCI address +cat /sys/class/net/pf0vf0/device/address + +# Get PF PCI address (if available) +cat /sys/class/net/pf0vf0/device/physfn/address +``` + +## Integration with Existing Features + +### Gateway Accelerated Interface + +The "derive-from-mgmt-port" feature is used in conjunction with management interface to select the appropriate gateway accelerated interface. + +The management port can be specified through one of the following options: +``` + --ovnkube-node-mgmt-port-netdev) + OVNKUBE_NODE_MGMT_PORT_NETDEV=$VALUE +``` + +``` + --ovnkube-node-mgmt-port-dp-resource-name) + OVNKUBE_NODE_MGMT_PORT_DP_RESOURCE_NAME=$VALUE +``` + +OVNKUBE_NODE_MGMT_PORT_DP_RESOURCE_NAME has priority over OVNKUBE_NODE_MGMT_PORT_NETDEV and it is easier to use since it points to a SRIOV Device Plugin pool name. + +### Multiple Network Support + +This feature works with multiple network support and can be used in environments where pods have multiple interfaces connected to different networks. + +## Limitations + +- Only available in DPU host mode +- Requires SR-IOV capable hardware +- Limited to the first available network device from the PF +- Depends on proper VF/PF driver support +- May not work with all SR-IOV implementations + +## Future Enhancements + +Potential improvements to this feature could include: + +- Support for selecting specific network devices based on criteria +- Integration with device plugin resources +- Support for multiple gateway interfaces +- Enhanced error reporting and diagnostics +- Support for non-SR-IOV hardware configurations \ No newline at end of file diff --git a/docs/features/hardware-offload/dpu-support.md b/docs/features/hardware-offload/dpu-support.md index 6c098de727..6ac6a5ca7d 100644 --- a/docs/features/hardware-offload/dpu-support.md +++ b/docs/features/hardware-offload/dpu-support.md @@ -17,3 +17,41 @@ on the embedded CPU. Any vendor that manufactures a DPU which supports the above model should work with current design. Design document can be found [here](https://docs.google.com/document/d/11IoMKiohK7hIyIE36FJmwJv46DEBx52a4fqvrpCBBcg/edit?usp=sharing). + +## OVN Kubernetes in a DPU-Accelerated Environment + +The **ovn-kubernetes** deployment will have two parts one on the host and another on the DPU side. + + +These aforementioned parts are expected to be deployed also on two different Kubernetes clusters, one for the host and another for the DPUs. + + +### Host Cluster +--- + +#### OVN Kubernetes control plane related component +- ovn-cluster-manager + +#### OVN Kubernetes components on a Standard Host (Non-DPU) +- local-nb-ovsdb +- local-sb-ovsdb +- run-ovn-northd +- ovnkube-controller-with-node +- ovn-controller +- ovs-metrics + +#### OVN Kubernetes component on a DPU-Enabled Host +- ovn-node + +For detailed configuration of gateway interfaces in DPU host mode, see [DPU Gateway Interface Configuration](dpu-gateway-interface.md). + +### DPU Cluster +--- + +#### OVN Kubernetes components +- local-nb-ovsdb +- local-sb-ovsdb +- run-ovn-northd +- ovnkube-controller-with-node +- ovn-controller +- ovs-metrics diff --git a/docs/features/network-qos-guide.md b/docs/features/network-qos-guide.md new file mode 100644 index 0000000000..2e07a7bc31 --- /dev/null +++ b/docs/features/network-qos-guide.md @@ -0,0 +1,329 @@ +# Guide to Using Network QoS + +## Contents + +1. [Overview](#1-overview) +2. [Create a Secondary Network (NAD)](#2-create-a-secondary-network) +3. [Define a NetworkQoS Policy](#3-define-a-networkqos-policy) +4. [Create Sample Pods and Verify the Configuration](#4-create-sample-pods-and-verify-the-configuration) +5. [Explain the NetworkQoS Object](#5-explain-the-networkqos-object) + +## **1 Overview** + +Differentiated Services Code Point (DSCP) marking and egress bandwidth metering let you prioritize or police specific traffic flows. The new **NetworkQoS** Custom Resource Definition (CRD) in [ovn-kubernetes](https://github.com/ovn-kubernetes/ovn-kubernetes/blob/master/dist/templates/k8s.ovn.org_networkqoses.yaml.j2) makes both features available to Kubernetes users on **all** pod interfaces—primary or secondary—without touching pod manifests. + +This guide provides a step-by-step example of how to use this feature. Before you begin, ensure that you have a Kubernetes cluster configured with the ovn-kubernetes CNI. Since the examples use network attachments, you must run the cluster with multiple network support enabled. In a kind cluster, you would use the following flags: + +```bash +cd contrib +./kind-helm.sh -nqe -mne ; # --enable-network-qos --enable-multi-network +``` + +## **2 Create a Secondary Network** + +File: nad.yaml + +```yaml +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + name: ovn-stream + namespace: default + labels: # label needed for NetworkQoS selector + nad-type: ovn-kubernetes-nqos +spec: + config: |2 + { + "cniVersion": "1.0.0", + "name": "ovn-stream", + "type": "ovn-k8s-cni-overlay", + "topology": "layer3", + "subnets": "10.245.0.0/16/24", + "mtu": 1300, + "master": "eth1", + "netAttachDefName": "default/ovn-stream" + } +``` +*Why the label?* `NetworkQoS` uses a label selector to find matching NADs. Without at least one label, the selector cannot match. + +## **3 Define a NetworkQoS Policy** + +File: nqos.yaml + +```yaml +apiVersion: k8s.ovn.org/v1alpha1 +kind: NetworkQoS +metadata: + name: qos-external + namespace: default +spec: + networkSelectors: + - networkSelectionType: NetworkAttachmentDefinitions + networkAttachmentDefinitionSelector: + namespaceSelector: {} # any namespace + networkSelector: + matchLabels: + nad-type: ovn-kubernetes-nqos + podSelector: + matchLabels: + nqos-app: bw-limited + priority: 10 # higher value wins in a tie-break + egress: + - dscp: 20 + bandwidth: + burst: 100 # kilobits + rate: 20000 # kbps + classifier: + to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.11.12.13/32 + - 172.16.0.0/12 + - 192.168.0.0/16 +``` +A full CRD template lives [here](https://github.com/ovn-kubernetes/ovn-kubernetes/blob/master/dist/templates/k8s.ovn.org_networkqoses.yaml.j2). + +The `egress` field is a list, allowing you to define multiple markings and bandwidth limits based on different classifiers. + +Note that this configuration will apply to the NAD of pods based on the network selector, and only on pods that have the label `nqos-app: bw-limited`. + +```bash +$ kubectl create -f nad.yaml && \ + kubectl create -f nqos.yaml + +networkattachmentdefinition.k8s.cni.cncf.io/ovn-stream created +networkqos.k8s.ovn.org/qos-external created +``` +At this point, the output from `kubectl get networkqoses` will look like this: + +```bash +$ kubectl api-resources -owide | head -1 ; \ + kubectl api-resources -owide | grep NetworkQoS +NAME SHORTNAMES APIVERSION NAMESPACED KIND VERBS CATEGORIES +networkqoses k8s.ovn.org/v1alpha1 true NetworkQoS delete,deletecollection,get,list,patch,create,update,watch + +$ kubectl get networkqoses qos-external -n default -owide +NAME STATUS +qos-external NetworkQoS Destinations applied +``` + +## **4 Create Sample Pods and Verify the Configuration** + +### **4.1 Launch Test Pods** + +To test this, let's create a pod using a helper function that allows us to add labels to it. + +File: create_pod.source + +```bash +create_pod() { + local pod_name=${1:-pod0} + local node_name=${2:-ovn-worker} + local extra_labels=${3:-} + + NAMESPACE=$(kubectl config view --minify --output 'jsonpath={..namespace}') + NAMESPACE=${NAMESPACE:-default} + + if ! kubectl get pod "$pod_name" -n "$NAMESPACE" &>/dev/null; then + echo "Creating pod $pod_name in namespace $NAMESPACE..." + + # Prepare labels block + labels_block=" name: $pod_name" + if [[ -n "$extra_labels" ]]; then + # Convert JSON string to YAML-compatible lines + while IFS="=" read -r k v; do + labels_block+=" + $k: $v" + done < <(echo "$extra_labels" | jq -r 'to_entries|map("\(.key)=\(.value)")|.[]') + fi + + # Generate the manifest + cat </dev/null 2>&1 & +# pod1 to pod2 +nohup kubectl exec -i pod1 -- ping -c 3600 -q $DST_IP_POD2 >/dev/null 2>&1 & + +sudo dnf install -y --quiet tcpdump ; # Install tcpdump, if needed + +IPNS=$(docker inspect --format '{{ '{{' }} .State.Pid }}' ovn-worker) +sudo nsenter -t ${IPNS} -n tcpdump -envvi eth0 geneve +``` + +``` +tcpdump: listening on eth0, link-type EN10MB (Ethernet), capture size 262144 bytes + +**Pod0 to Pod2**: Notice that since pod0 does not have the label to match against NetworkQoS, its TOS is 0. However, pod2's response is DSCP marked (tos 0x50), since pod2 matches the NetworkQoS criteria with the label `nqos-app: bw-limited`. + +12:46:30.755551 02:42:ac:12:00:06 > 02:42:ac:12:00:05, ethertype IPv4 (0x0800), length 156: (tos 0x0, ttl 64, id 26896, offset 0, flags [DF], proto UDP (17), length 142) + 172.18.0.6.38210 > 172.18.0.5.geneve: [bad udp cksum 0x58bb -> 0xc87d!] Geneve, Flags [C], vni 0x12, proto TEB (0x6558), options [class Open Virtual Networking (OVN) (0x102) type 0x80(C) len 8 data 00090006] + 0a:58:0a:f5:02:01 > 0a:58:0a:f5:02:03, ethertype IPv4 (0x0800), length 98: (tos 0x0, ttl 63, id 61037, offset 0, flags [DF], proto ICMP (1), length 84) + 10.245.4.4 > 10.245.2.3: ICMP echo request, id 14, seq 44, length 64 + +— + +12:46:30.755694 02:42:ac:12:00:05 > 02:42:ac:12:00:06, ethertype IPv4 (0x0800), length 156: (tos 0x50, ttl 64, id 46220, offset 0, flags [DF], proto UDP (17), length 142) + 172.18.0.5.38210 > 172.18.0.6.geneve: [bad udp cksum 0x58bb -> 0xc47d!] Geneve, Flags [C], vni 0x12, proto TEB (0x6558), options [class Open Virtual Networking (OVN) (0x102) type 0x80(C) len 8 data 0004000a] + 0a:58:0a:f5:04:01 > 0a:58:0a:f5:04:04, ethertype IPv4 (0x0800), length 98: (tos 0x50, ttl 63, id 45002, offset 0, flags [none], proto ICMP (1), length 84) + 10.245.2.3 > 10.245.4.4: ICMP echo reply, id 14, seq 44, length 64 + +—--------- + +**Pod1 to Pod2**: Traffic is marked both ways (both pods have the matching label) + +12:46:30.497289 02:42:ac:12:00:06 > 02:42:ac:12:00:05, ethertype IPv4 (0x0800), length 156: (tos 0x50, ttl 64, id 26752, offset 0, flags [DF], proto UDP (17), length 142) + 172.18.0.6.7856 > 172.18.0.5.geneve: [bad udp cksum 0x58bb -> 0x3f10!] Geneve, Flags [C], vni 0x12, proto TEB (0x6558), options [class Open Virtual Networking (OVN) (0x102) type 0x80(C) len 8 data 00090006] + 0a:58:0a:f5:02:01 > 0a:58:0a:f5:02:03, ethertype IPv4 (0x0800), length 98: (tos 0x50, ttl 63, id 21760, offset 0, flags [DF], proto ICMP (1), length 84) + 10.245.4.3 > 10.245.2.3: ICMP echo request, id 14, seq 56, length 64 + +— + +12:46:30.497381 02:42:ac:12:00:05 > 02:42:ac:12:00:06, ethertype IPv4 (0x0800), length 156: (tos 0x50, ttl 64, id 46019, offset 0, flags [DF], proto UDP (17), length 142) + 172.18.0.5.7856 > 172.18.0.6.geneve: [bad udp cksum 0x58bb -> 0x3b11!] Geneve, Flags [C], vni 0x12, proto TEB (0x6558), options [class Open Virtual Networking (OVN) (0x102) type 0x80(C) len 8 data 0004000a] + 0a:58:0a:f5:04:01 > 0a:58:0a:f5:04:03, ethertype IPv4 (0x0800), length 98: (tos 0x50, ttl 63, id 3850, offset 0, flags [none], proto ICMP (1), length 84) + 10.245.2.3 > 10.245.4.3: ICMP echo reply, id 14, seq 56, length 64 +``` + +## **5 Explain the NetworkQoS Object** + +Below is an *abbreviated* map of the CRD schema returned by `kubectl explain networkqos --recursive` (v1alpha1). Use this as a quick reference. For the definitive specification, always consult the `kubectl explain` output or the CRD YAML in the ovn-kubernetes repository. + +### **5.1 Top‑level `spec` keys** + +| Field | Type | Required | Purpose | +| ----- | ----- | ----- | ----- | +| **podSelector** | `LabelSelector` | No | Selects pods whose traffic will be evaluated by the QoS rules. If empty, all pods in the namespace are selected. | +| **networkSelectors[]** | list `NetworkSelector` | No | Restricts the rule to traffic on specific networks. If absent, the rule matches any interface. *(See §5.2)* | +| **priority** | `int` | **Yes** | Higher number → chosen first when multiple `NetworkQoS` objects match the same packet. | +| **egress[]** | list `EgressRule` | **Yes** | One or more marking / policing rules. Evaluated in the order listed. *(See §5.3)* | + +Note the square-bracket notation (`[]`) for **both** `egress` and `networkSelectors`—each is an array in the CRD. + +--- + +### **5.2 Inside a `networkSelectors[]` entry** + +Each list element tells the controller **where** the pods' egress traffic must flow in order to apply the rule. Exactly **one** selector type must be set. + +| Key | Required | Description | +| :---- | :---- | :---- | +| `networkSelectionType` | **Yes** | Enum that declares which selector below is populated. Common values: `NetworkAttachmentDefinitions`, `DefaultNetwork`, `SecondaryUserDefinedNetworks`, … | +| `networkAttachmentDefinitionSelector` | conditional | When `networkSelectionType=NetworkAttachmentDefinitions`. Selects NADs by **namespaceSelector** (required) *and* **networkSelector** (required). Both are ordinary `LabelSelectors`. | +| `secondaryUserDefinedNetworkSelector` | conditional | Used when `networkSelectionType=SecondaryUserDefinedNetworks`. Similar structure: required **namespaceSelector** & **networkSelector**. | +| `clusterUserDefinedNetworkSelector`, `primaryUserDefinedNetworkSelector` | conditional | Additional selector styles, each with required sub‑selectors as per the CRD. | + +**Typical usage** – `networkSelectionType: NetworkAttachmentDefinitions` + `networkAttachmentDefinitionSelector`. + +--- + +### **5.3 Inside an `egress[]` rule** + +| Field | Type | Required | Description | +| :---- | :---- | :---- | :---- | +| `dscp` | `int` (0 – 63) | **Yes** | DSCP value to stamp on the **inner** IP header. This value determines the traffic priority. | +| `bandwidth.rate` | `int` (kbps) | No | Sustained rate for the token-bucket policer (in kilobits per second). | +| `bandwidth.burst` | `int` (kilobits) | No | Maximum burst size that can accrue (in kilobits). | +| `classifier.to` / `classifier.from` | list `TrafficSelector` | No | CIDRs the packet destination (or source) must match. Each entry is an `ipBlock` supporting an `except` list. | +| `classifier.ports[]` | list | No | List of `{protocol, port}` tuples the packet must match; protocol is `TCP`, `UDP`, or `SCTP`. | + +If **all** specified classifier conditions match, the packet gets the DSCP mark and/or bandwidth policer defined above. This allows for fine-grained control over which traffic flows receive QoS treatment. diff --git a/docs/images/ovn-inside-k8s-stacked.png b/docs/images/ovn-inside-k8s-stacked.png index ea2c8937cb..2fc5b0770f 100644 Binary files a/docs/images/ovn-inside-k8s-stacked.png and b/docs/images/ovn-inside-k8s-stacked.png differ diff --git a/docs/images/ovn-inside-k8s.png b/docs/images/ovn-inside-k8s.png index 91d9981986..3393d0af27 100644 Binary files a/docs/images/ovn-inside-k8s.png and b/docs/images/ovn-inside-k8s.png differ diff --git a/docs/installation/launching-ovn-kubernetes-on-kind.md b/docs/installation/launching-ovn-kubernetes-on-kind.md index c3a49ddde7..1a6ae11a15 100644 --- a/docs/installation/launching-ovn-kubernetes-on-kind.md +++ b/docs/installation/launching-ovn-kubernetes-on-kind.md @@ -14,19 +14,19 @@ KIND (Kubernetes in Docker) deployment of OVN kubernetes is a fast and easy mean sudo firewall-cmd --permanent --add-port=11337/tcp; sudo firewall-cmd --reload ``` - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- Python and pip +- Python 3 and [pipx](https://pipx.pypa.io/stable/installation/) - jq - openssl - openvswitch - -**NOTE :** In certain operating systems such as CentOS 8.x, pip2 and pip3 binaries are installed instead of pip. In such situations create a softlink for "pip" that points to "pip2". +- Go 1.23.0 or above +- For podman users: skopeo For OVN kubernetes KIND deployment, use the `kind.sh` script. First Download and build the OVN-Kubernetes repo: -``` -git clone github.com/ovn-org/ovn-kubernetes; +```shell +git clone https://github.com/ovn-kubernetes/ovn-kubernetes.git cd ovn-kubernetes ``` The `kind.sh` script builds OVN-Kubernetes into a container image. To verify @@ -79,13 +79,16 @@ To deploy KIND however, you need to start it as root and then copy root's kube c ``` $ pushd contrib $ sudo ./kind.sh -ep podman +$ mkdir -p ~/.kube $ sudo cp /root/ovn.conf ~/.kube/kind-config $ sudo chown $(id -u):$(id -g) ~/.kube/kind-config $ export KUBECONFIG=~/.kube/kind-config $ popd ``` -This will launch a KIND deployment. By default the cluster is named `ovn`. +**NOTE:** If you installed go via the official path on Linux and have encountered the "go: command not found" issue, you can preserve your environment when doing sudo: `sudo --preserve-env=PATH ./kind.sh -ep podman` + +This will launch a KIND deployment. By default, the cluster is named `ovn`. ``` $ kubectl get nodes diff --git a/docs/installation/ovn_k8s.conf.5 b/docs/installation/ovn_k8s.conf.5 index fc790db071..10f224b831 100644 --- a/docs/installation/ovn_k8s.conf.5 +++ b/docs/installation/ovn_k8s.conf.5 @@ -124,6 +124,16 @@ or set to "shared" (share a network interface) or "local" (use a NAT-ed virtual This interface will be used as the gateway interface in "shared" mode. If not specified the interface with the default route will be used. .TP +\fBinterface\fR=derive-from-mgmt-port +In DPU host mode, automatically resolve the gateway interface from PCI address. +This performs the following steps: +1. Get the management port network device name +2. Retrieve the PCI address of the management port device +3. Get the Physical Function (PF) PCI address from the Virtual Function (VF) PCI address +4. Retrieve all network devices associated with the PF PCI address +5. Select the first available network device as the gateway interface +This option requires SR-IOV capable hardware and must be used with DPU host mode. +.TP \fBnext-hop\fR=1.2.3.4 This is the gateway IP address of \fBinterface\fR to which traffic exiting the OVN logical network should be sent in "shared" mode. If not specified diff --git a/docs/observability/ovn-observability.md b/docs/observability/ovn-observability.md index 3dab5f39fb..5810fea58a 100644 --- a/docs/observability/ovn-observability.md +++ b/docs/observability/ovn-observability.md @@ -68,9 +68,11 @@ No API changes were done. ### OVN sampling details OVN has 3 main db tables that are used for sampling: -- `Sample_collector`: This table is used to define the sampling collector. It defines the sampling rate and collectorID, -which is used to set up collectors in the OVS. +- `Sample_collector`: This table is used to define the sampling collector. It defines the sampling rate via `Probability` field +and collectorID via `SetID` field, which is used to set up collectors in the OVS. - `Sampling_app`: This table is used to set `ID`s for existing OVN sampling applications, that are sent together with the samples. +There is a supported set of `Sampling_app` types, for example `acl-new` app is used to sample new connections matched by an ACL. +`Sampling_app.ID` is a way to identify the application that generated the sample. - `Sample`: This table is used to define required samples and point to the collectors. Every sample has `Metadata` that is sent together with the sample. @@ -84,15 +86,21 @@ that is decoded by `go-controller/observability-lib`. When one of the supported objects (for example, network policy) is created, ovn-kuberentes generates an nbdb `Sample` for it. To decode the samples into human-readable information, `go-controller/observability-lib` is used. It finds `Sample` -by the attached `Sample.Metadata` and then gets corresponding db object based on `Sampling_add.ID` and `Sample.UUID`. -The message is then constructed using db object `external_ids`. - -### Full stack architecture +by the attached `Sample.Metadata` and then gets corresponding db object (e.g. ACL) based on `Sampling_app.ID` and `Sample.UUID`. +The message is then constructed using db object (e.g. ACL) `external_ids`. ![ovnkube-observ](../images/ovnkube-observ.png) The diagram shows how all involved components (kernel, OVS, OVN, ovn-kubernetes) are connected. +#### Enabling collectors + +Currently, we have only 1 default collector with hard-coded ID, which is set via the `Sample_collector.SetID` field. +To make OVS start sending samples for an existing `Sample_collector`, a new OVSDB `Flow_Sample_Collector_Set` entry +needs to be created with `Flow_Sample_Collector_Set.ID` value of `Sample_collector.SetID`. +This is done by the `go-controller/observability-lib` and it is important to note that only one `Flow_Sample_Collector_Set` +should be created for a given `Sample_collector.SetID` value at a time. But if such entry already exists, it can be reused. + ## Best Practices TDB @@ -126,6 +134,9 @@ This applies to in both cases ANP will have only first-packet sample. +Use caution when running the `ovnkube-observe` tool. Currently it has poor resource management and consumes a lot of +CPU when many packets are sent. Tracked here https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5203 + ## References NONE diff --git a/docs/okeps/okep-5233-preconfigured-udn-addresses.md b/docs/okeps/okep-5233-preconfigured-udn-addresses.md new file mode 100644 index 0000000000..332fec24fb --- /dev/null +++ b/docs/okeps/okep-5233-preconfigured-udn-addresses.md @@ -0,0 +1,514 @@ +# OKEP-5233: Predefined addresses for primary user defined networks workloads + +* Issue: [#5233](https://github.com/ovn-org/ovn-kubernetes/issues/5233) + +## Problem Statement + +Migrating legacy workloads with predefined network configurations (IP, MAC, default gateway) +to OVN-Kubernetes is currently not possible. There is a need to import these workloads, preserving +their network configuration, while also enabling non-NATed traffic to better integrate with +existing infrastructures. + +## Goals + +* Enable pods on primary Layer2 User Defined Network (UDN) and Cluster UDN to use a predefined static network + configuration including IP address, MAC address, and default gateway. +* Ensure it is possible to enable non-NATed traffic for pods with predefined static network configuration + by exposing the Layer2 Cluster UDN through BGP (see [Risks, Known Limitations and Mitigations](#risks-known-limitations-and-mitigations) for current BGP support limitations). + +## Non-Goals + +* Modifying the default gateway and management IPs of a primary UDN after it was created. +* Modifying a pod's network configuration after the pod was created. +* Non-NATed traffic support in secondary networks. +* Predefined IP/MAC addresses support for pods in Layer3 UDNs. +* Configurable default gateway and infrastructure addresses in Layer3 UDNs. +* Predefined IP/MAC addresses support for pods in Localnet UDNs. +* Configuring default gateway and infrastructure addresses in Layer2 (Cluster) UDNs that do not belong to the networks subnets. +* No-downtime workload migration. + +## Introduction + +Legacy workloads, particularly virtual machines, are often set up with static +network configurations. When migrating to OVN-Kubernetes UDNs, +it should be possible to integrate these gradually to prevent disruptions. + +Currently, OVN-Kubernetes allocates IP addresses dynamically and it generates the MAC +addresses from it. It sets the pod's default gateway to the first usable IP address of its subnet. +For primary UDNs, it additionally reserves the second usable IP address for the internal management port which +excludes it from being available for workloads. + +## User-Stories/Use-Cases + +* As a user, I want to define a custom default gateway IP for a new primary Layer2 UDN +so that my migrated workloads can maintain their existing network configuration without disruption. + +* As a user, I want the ability to configure a new primary Layer2 UDN with a custom management IP +address to prevent IP conflicts with the workloads I am importing. + +* As a user, I want to assign a predefined IP address and MAC address to a pod to ensure the +network identity of my imported workload is maintained. + +* As a user, I want to prevent OVN-Kubernetes from automatically assigning IP addresses that are +already in use by my existing infrastructure, so that I can migrate my services gradually without network conflicts. + +## Proposed Solution + +### Primary UDN configuration + +To support the migration of pre-configured workloads, the UDN and cluster UDN API has to +be enhanced. The aim is to provide control over the IP addresses that OVN-Kubernetes +consumes in the overlay network, this includes the default gateway and management IPs. +The proposed changes are specified in the [Layer2 User Defined Network API changes](#layer2-user-defined-network-api-changes) section. + +### Pod network identity + +OVN-Kubernetes currently supports configuring pods' secondary network interfaces through +the `k8s.v1.cni.cncf.io/networks` annotation, which contains a JSON array of +[NetworkSelectionElement](https://github.com/k8snetworkplumbingwg/network-attachment-definition-client/blob/e12bd55d48a1f798a1720218819063f5903b72e3/pkg/apis/k8s.cni.cncf.io/v1/types.go#L136-L171) +objects. Additionally, it is possible to modify the cluster's default network attachment by +setting the `v1.multus-cni.io/default-network` annotation to a singular NetworkSelectionElement +object. + +To enable using predefined MAC and IP addresses on pods attached to a primary UDN, +the `v1.multus-cni.io/default-network` will be reused, as it is a well-known annotation for +configuring the pod's default network. The `k8s.v1.cni.cncf.io/networks` annotation is specific to +secondary networks and expects a list of networks, which does not fit well with primary UDNs. +With the proposed approach, the `k8s.ovn.org/primary-udn-ipamclaim` annotation, used to link a +pod with a matching claim, will be deprecated in favor of the `IPAMClaimReference` field in the +NetworkSelectionElement. When `IPAMClaimReference` is specified we will update its status to reflect +the result of the IP allocation, see [IPAMClaim API changes](#ipamclaim-api-changes). +OVN-Kubernetes will keep track of all allocated MAC and IP addresses to detect conflicts. +When a conflict is detected, OVN-Kubernetes will emit a Kubernetes event to the pod indicating +the specific conflict (IP or MAC address already in use) and prevent the pod from starting. + +```mermaid +%%{init: { 'sequence': {'messageAlign': 'left'} }}%% +sequenceDiagram +actor User +participant K8s_API_Server as "K8s API Server" +participant OVN_K_Controller as "OVN-Kubernetes" + +note over User, K8s_API_Server: Pre-Step: User defines UDN
(Optional) with custom Default Gateway / Management IP + +User->>K8s_API_Server: Create Pod with annotation:
'v1.multus-cni.io/default-network':
[{
name: 'default',
namespace: 'ovn-kubernetes',
ips: ['10.0.0.10'],
mac: '00:1A:2B:3C:4D:5E',
ipam-claim-reference: 'my-claim'
}] + + +K8s_API_Server->>OVN_K_Controller: Notify: New Pod Spec + +OVN_K_Controller->>OVN_K_Controller: Parse 'v1.multus-cni.io/default-network' annotation
Perform IP/MAC conflict check within UDN
(Verify requested IP/MAC are not in use) + + +alt "No IP/MAC Conflict" +opt "IPAMClaimReference is specified in annotation" +OVN_K_Controller->>K8s_API_Server: Update Status conditions and addresses of referenced IPAMClaim +end + +OVN_K_Controller->>OVN_K_Controller: Configure Pod's Primary Network Interface +note right of OVN_K_Controller: Pod provisioning succeeds +else "IP/MAC Conflict Detected in UDN" +opt "IPAMClaimReference is specified in annotation" +OVN_K_Controller->>K8s_API_Server: Update Status conditions of referenced IPAMClaim +end +OVN_K_Controller->>K8s_API_Server: Emit IP/MAC Conflict error event to the Pod +note right of OVN_K_Controller: Pod provisioning fails +end +``` + +### API Details + +#### Layer2 User Defined Network API changes + +Proposed API change adds `infrastructureSubnets` `reservedSubnets` and `defaultGatewayIPs` fields to the `Layer2Config` which is a part of both +the [UDN](https://github.com/ovn-kubernetes/ovn-kubernetes/blob/a3d0a2b238bef9b1399b3342228d75504afed18b/go-controller/pkg/crd/userdefinednetwork/v1/udn.go#L47) +and [cluster UDN](https://github.com/ovn-kubernetes/ovn-kubernetes/blob/a3d0a2b238bef9b1399b3342228d75504afed18b/go-controller/pkg/crd/userdefinednetwork/v1/cudn.go#L63) specs: + +```diff +// +kubebuilder:validation:XValidation:rule="has(self.ipam) && has(self.ipam.mode) && self.ipam.mode != 'Enabled' || has(self.subnets)", message="Subnets is required with ipam.mode is Enabled or unset" +// +kubebuilder:validation:XValidation:rule="!has(self.ipam) || !has(self.ipam.mode) || self.ipam.mode != 'Disabled' || !has(self.subnets)", message="Subnets must be unset when ipam.mode is Disabled" +// +kubebuilder:validation:XValidation:rule="!has(self.ipam) || !has(self.ipam.mode) || self.ipam.mode != 'Disabled' || self.role == 'Secondary'", message="Disabled ipam.mode is only supported for Secondary network" +// +kubebuilder:validation:XValidation:rule="!has(self.joinSubnets) || has(self.role) && self.role == 'Primary'", message="JoinSubnets is only supported for Primary network" +// +kubebuilder:validation:XValidation:rule="!has(self.subnets) || !has(self.mtu) || !self.subnets.exists_one(i, isCIDR(i) && cidr(i).ip().family() == 6) || self.mtu >= 1280", message="MTU should be greater than or equal to 1280 when IPv6 subnet is used" ++ // +kubebuilder:validation:XValidation:rule="!has(self.defaultGatewayIPs) || has(self.role) && self.role == 'Primary'", message="defaultGatewayIPs is only supported for Primary network" ++ // +kubebuilder:validation:XValidation:rule="!has(self.defaultGatewayIPs) || self.defaultGatewayIPs.all(ip, self.subnets.exists(subnet, cidr(subnet).containsIP(ip)))", message="defaultGatewayIPs must belong to one of the subnets specified in the subnets field" ++ // +kubebuilder:validation:XValidation:rule="!has(self.reservedSubnets) || has(self.reservedSubnets) && has(self.subnets)", message="reservedSubnets must be unset when subnets is unset" ++ // +kubebuilder:validation:XValidation:rule="!has(self.reservedSubnets) || self.reservedSubnets.all(e, self.subnets.exists(s, cidr(s).containsCIDR(cidr(e))))",message="reservedSubnets must be subnetworks of the networks specified in the subnets field",fieldPath=".reservedSubnets" ++ // +kubebuilder:validation:XValidation:rule="!has(self.infrastructureSubnets) || has(self.infrastructureSubnets) && has(self.subnets)", message="infrastructureSubnets must be unset when subnets is unset" ++ // +kubebuilder:validation:XValidation:rule="!has(self.infrastructureSubnets) || self.infrastructureSubnets.all(e, self.subnets.exists(s, cidr(s).containsCIDR(cidr(e))))",message="infrastructureSubnets must be subnetworks of the networks specified in the subnets field",fieldPath=".infrastructureSubnets" ++ // +kubebuilder:validation:XValidation:rule="!has(self.infrastructureSubnets) || !has(self.defaultGatewayIPs) || self.defaultGatewayIPs.all(ip, self.infrastructureSubnets.exists(subnet, cidr(subnet).containsIP(ip)))", message="defaultGatewayIPs have to belong to infrastructureSubnets" ++ // +kubebuilder:validation:XValidation:rule="!has(self.infrastructureSubnets) || !has(self.reservedSubnets) || self.infrastructureSubnets.all(infra, !self.reservedSubnets.exists(reserved, cidr(infra).containsCIDR(reserved) || cidr(reserved).containsCIDR(infra)))", message="infrastructureSubnets and reservedSubnets must not overlap" +type Layer2Config struct { + +// Role describes the network role in the pod. +// +// Allowed value is "Secondary". +// Secondary network is only assigned to pods that use `k8s.v1.cni.cncf.io/networks` annotation to select given network. +// +// +kubebuilder:validation:Enum=Primary;Secondary +// +kubebuilder:validation:Required +// +required +Role NetworkRole `json:"role"` + +// MTU is the maximum transmission unit for a network. +// MTU is optional, if not provided, the globally configured value in OVN-Kubernetes (defaults to 1400) is used for the network. +// +// +kubebuilder:validation:Minimum=576 +// +kubebuilder:validation:Maximum=65536 +// +optional +MTU int32 `json:"mtu,omitempty"` + +// Subnets are used for the pod network across the cluster. +// Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed. +// +// The format should match standard CIDR notation (for example, "10.128.0.0/16"). +// This field must be omitted if `ipam.mode` is `Disabled`. +// +// +optional +Subnets DualStackCIDRs `json:"subnets,omitempty"` + ++ // reservedSubnets specifies a list of CIDRs reserved for static IP assignment, excluded from automatic allocation. ++ // reservedSubnets is optional. When omitted, all IP addresses in `subnets` are available for automatic assignment. ++ // IPs from these ranges can still be requested through static IP assignment in pod annotations. ++ // Each item should be in range of the specified CIDR(s) in `subnets`. ++ // The maximum number of entries allowed is 25. ++ // The format should match standard CIDR notation (for example, "10.128.0.0/16"). ++ // This field must be omitted if `subnets` is unset or `ipam.mode` is `Disabled`. ++ // +optional ++ // +kubebuilder:validation:MinItems=1 ++ // +kubebuilder:validation:MaxItems=25 ++ ReservedSubnets []CIDR `json:"reservedSubnets,omitempty"` + +// JoinSubnets are used inside the OVN network topology. +// +// Dual-stack clusters may set 2 subnets (one for each IP family), otherwise only 1 subnet is allowed. +// This field is only allowed for "Primary" network. +// It is not recommended to set this field without explicit need and understanding of the OVN network topology. +// When omitted, the platform will choose a reasonable default which is subject to change over time. +// +// +optional +JoinSubnets DualStackCIDRs `json:"joinSubnets,omitempty"` + ++ // infrastructureSubnets specifies a list of internal CIDR ranges that OVN-Kubernetes will reserve for internal network infrastructure. ++ // Any IP addresses within these ranges cannot be assigned to workloads. ++ // When omitted, OVN-Kubernetes will automatically allocate IP addresses from `subnets` for its infrastructure needs. ++ // When `reservedSubnets` is also specified the CIDRs cannot overlap. ++ // When `defaultGatewayIPs` is also specified the default gateway IPs must belong to one of the CIDRs. ++ // Each item should be in range of the specified CIDR(s) in `subnets`. ++ // The maximum number of entries allowed is 10. ++ // The format should match standard CIDR notation (for example, "10.128.0.0/16"). ++ // This field must be omitted if `subnets` is unset or `ipam.mode` is `Disabled`. ++ // +optional ++ // +kubebuilder:validation:MinItems=1 ++ // +kubebuilder:validation:MaxItems=10 ++ InfrastructureSubnets []CIDR `json:"infrastructureSubnets,omitempty"` + ++ // defaultGatewayIPs specifies the default gateway IP used in the internal OVN topology. ++ // ++ // Dual-stack clusters may set 2 IPs (one for each IP family), otherwise only 1 IP is allowed. ++ // This field is only allowed for "Primary" network. ++ // It is not recommended to set this field without explicit need and understanding of the OVN network topology. ++ // When omitted, an IP from network subnet is used. ++ // ++ // +optional ++ DefaultGatewayIPs DualStackIPs `json:"defaultGatewayIPs,omitempty"` + +// IPAM section contains IPAM-related configuration for the network. +// +optional +IPAM *IPAMConfig `json:"ipam,omitempty"` +} + +// +kubebuilder:validation:XValidation:rule="isIP(self)", message="IP is invalid" +type IP string + +// +kubebuilder:validation:MinItems=1 +// +kubebuilder:validation:MaxItems=2 +// +kubebuilder:validation:XValidation:rule="size(self) != 2 || !isIP(self[0]) || !isIP(self[1]) || ip(self[0]).family() != ip(self[1]).family()", message="When 2 IPs are set, they must be from different IP families" +type DualStackIPs []IP + +``` + +The API changes mentioned above will be carried to the `NetworkAttachmentDefinition` JSON spec. + +#### IPAMClaim API changes + +The following pull request is tracking the IPAMClaim API change that introduces the status conditions: + + +[IPAMClaim CRD doc](https://docs.google.com/document/d/1OQIJIrCtsYpR5O44w0hpoJ2TyKBz1Du-KhRT4RtrAjk) - `IPAM allocation on behalf of other entities` section + +### Usage Example + +A user migrating services wants to import a workload pod preserving it's original IP address. +Workload data: + +```yaml +IP: 192.168.100.205 +MAC: 00:1A:2B:3C:4D:5E +Default Gateway: 192.168.100.2 +``` + +```yaml +apiVersion: k8s.ovn.org/v1 +kind: ClusterUserDefinedNetwork +metadata: + name: network-l2 +spec: + topology: "Layer2" + layer2: + role: Primary + subnets: ["192.168.100.0/24"] + infrastructureSubnets: ["192.168.100.0/30"] # used for OVN-Kubernetes infrastructure + reservedSubnets: ["192.168.100.200/29"] # reserved for workloads that will require predefined addresses + defaultGatewayIPs: ["192.168.100.2"] +``` + +With this configuration, OVN-Kubernetes automatically assigns IPs from `.4-.199` and `.208-.254` for new workloads, while pods can request specific IPs from the reserved range: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: migrated-app + annotations: + v1.multus-cni.io/default-network: | + {"name": "default", "namespace": "ovn-kubernetes", "ips": ["192.168.100.205"], "mac": "00:1A:2B:3C:4D:5E", "ipam-claim-reference": "my-claim"} +spec: +``` + +### Implementation Details + +#### Configurability + +The changes outlined in this enhancement should be configurable. This means a configuration knob +is required to instruct OVN-Kubernetes on whether to process the annotation described in the +[Pod network identity](#pod-network-identity) section. The feature knob will be called `preconfigured-udn-addresses-enable`. + +#### NetworkSelectionElement annotation + +Currently, the `v1.multus-cni.io/default-network` annotation is only processed for the cluster default network. +This enhancement will extend this behavior, allowing it to be applied to pods created in the primary Layer2 UDN as well. +The annotation should only be processed for new pods, modifying it after the addresses were allocated won't +be reflected in the pods network configuration and this should be blocked through a +[Validating Admission Policy](https://kubernetes.io/docs/reference/access-authn-authz/validating-admission-policy/): + +```yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicy +metadata: + name: predefined-network-addresses +spec: + matchConstraints: + resourceRules: + - apiGroups: [""] + apiVersions: ["v1"] + operations: ["UPDATE"] + resources: ["pods"] + failurePolicy: Fail + validations: + - expression: "('v1.multus-cni.io/default-network' in oldObject.metadata.annotations) == ('v1.multus-cni.io/default-network' in object.metadata.annotations)" + message: "The 'v1.multus-cni.io/default-network' annotation cannot be changed after the pod was created" +``` + +The `NetworkSelectionElement` structure has an extensive list of fields, this enhancement +focuses only on the following: + +```cgo +type NetworkSelectionElement struct { + // Name contains the name of the Network object this element selects + Name string `json:"name"` + // Namespace contains the optional namespace that the network referenced + // by Name exists in + Namespace string `json:"namespace,omitempty"` + // IPRequest contains an optional requested IP addresses for this network + // attachment + IPRequest []string `json:"ips,omitempty"` + // MacRequest contains an optional requested MAC address for this + // network attachment + MacRequest string `json:"mac,omitempty"` + // IPAMClaimReference container the IPAMClaim name where the IPs for this + // attachment will be located. + IPAMClaimReference string `json:"ipam-claim-reference,omitempty"` +} +``` + +Any other field set in the struct will be ignored by OVN-Kubernetes. + +When using the `v1.multus-cni.io/default-network` annotation, Multus strictly requires its value to reference an +existing NAD. Multus then builds the CNI requests based on it. +This proposal introduces a static default NAD object applied to the cluster. This object will serve as a +stub to generate the CNI calls, preserving the current behavior: + +```yaml +apiVersion: k8s.cni.cncf.io/v1 +kind: NetworkAttachmentDefinition +metadata: + name: default + namespace: ovn-kubernetes +spec: + config: '{"cniVersion": "0.4.0", "name": "ovn-kubernetes", "type": "ovn-k8s-cni-overlay"}' +``` + +With this approach, users must configure the `Name` to `default` and the `Namespace` to `ovn-kubernetes`. +This configuration ensures Multus still references the default network while OVN-Kubernetes will internally use the +primary UDN to handle MAC/IP requests from the NSE. + +> The default NAD object specified above is already used when the default network is exposed through BGP as +part of the route advertisement feature. The proposal is to have it available all the time. + +With `k8s.ovn.org/primary-udn-ipamclaim` being deprecated in favor of the `IPAMClaimReference` field +in the `NetworkSelectionElement` we have to define the expected behavior. To avoid conflicting +settings when `v1.multus-cni.io/default-network` is set the `k8s.ovn.org/primary-udn-ipamclaim` is +going to be ignored, it will be reflected in the opposite scenario for backwards compatibility +with a plan to remove it in a future release. +Deprecation plan for the `k8s.ovn.org/primary-udn-ipamclaim` annotation: + +* release-N - emit a warning event stating that the annotation is deprecated and will be removed in a future release. +* release-N+1 - fail to configure pods with the annotation set. +* release-N+2 - remove any code handling the annotation, effectively ignoring it. + +Note that `GatewayRequest` is not listed, the default gateway is an attribute of the network is not going to be +configurable per pod. + +### Address allocation + +OVN-Kubernetes currently [generates](https://github.com/ovn-kubernetes/ovn-kubernetes/blob/3ef29b9a32b04b7917a0afd6b0e9651d17242ed7/go-controller/pkg/util/net.go#L100-L113) +the overlay MAC addresses from the IPs: + +* IPv4: It takes the four octets of the address (e.g `AA.BB.CC.DD`) and uses them to +create the MAC address with a constant prefix (e.g. `0A:58:AA:BB:CC:DD`). +* IPv6: Computes a SHA256 checksum from the IPv6 string and uses the first four bytes for the MAC +address with the `0A:58` constant prefix(e.g. `0A:58:SHA[0]:SHA[1]:SHA[2]:SHA[3]`). + +Although unlikely, we need to implement logic that ensures that the MAC address requested through +the `NetworkSelectionElement` does not conflict with any other configured address on the UDN +(including addresses consumed by OVN-Kubernetes). + +OVN-Kubernetes already persists the IP and MAC addresses in the `k8s.ovn.org/pod-networks` annotation for each pod: + +```cgo +// PodAnnotation describes the assigned network details for a single pod network. (The +// actual annotation may include the equivalent of multiple PodAnnotations.) +type PodAnnotation struct { +// IPs are the pod's assigned IP addresses/prefixes +IPs []*net.IPNet +// MAC is the pod's assigned MAC address +MAC net.HardwareAddr +// Gateways are the pod's gateway IP addresses; note that there may be +// fewer Gateways than IPs. +Gateways []net.IP + +// GatewayIPv6LLA is the IPv6 Link Local Address for the pod's gateway, that is the address +// that will be set as gateway with router advertisements +// generated from the gateway router from the node where the pod is running. +GatewayIPv6LLA net.IP + +// Routes are additional routes to add to the pod's network namespace +Routes []PodRoute + +// TunnelID assigned to each pod for layer2 secondary networks +TunnelID int + +// Role defines what role this network plays for the given pod. +// Expected values are: +// (1) "primary" if this network is the primary network of the pod. +// The "default" network is the primary network of any pod usually +// unless user-defined-network-segmentation feature has been activated. +// If network segmentation feature is enabled then any user defined +// network can be the primary network of the pod. +// (2) "secondary" if this network is the secondary network of the pod. +// Only user defined networks can be secondary networks for a pod. +// (3) "infrastructure-locked" is applicable only to "default" network if +// a user defined network is the "primary" network for this pod. This +// signifies the "default" network is only used for probing and +// is otherwise locked for all intents and purposes. +// At a given time a pod can have only 1 network with role:"primary" +Role string +} +``` + +This annotation will be used to build an initial cache of allocated addresses at startup, which will then be updated +dynamically at runtime and used for conflict detection. +A similar approach is required for IP address conflict detection. +When a conflict is detected the pod should not start and an appropriate event should be emitted. + +When the `NetworkSelectionElement` contains an `IPAMClaimReference` the referenced IPAMClaim should +reflect the IP allocation status including error reporting through the newly introduced +`Conditions` status field. +In the opposite scenario where the `NetworkSelectionElement` does not specify the `IPAMClaimReference` +the IP allocation is not persisted when the pod is removed. + +### Testing Details + +The following scenarios should be covered in testing: + +* VM workloads import into OVN-Kubernetes with no changes to the instances network configuration. +* Imported VM workloads can live-migrate to another node without any additional traffic disruption. +* 'v1.multus-cni.io/default-network' cannot be changed after the pod was created. +* It should be possible to configure the pods MAC or the IP address without configuring the other. +* When `reservedSubnets` is configured automatic IP allocation should not use addresses specified in it. +* It should be possible to configure the pods IP address using the 'v1.multus-cni.io/default-network' +even if the address is a part of the `reservedSubnets`. +* Requesting an IP address and default gateway IP that is not a part of the networks subnet should fail. +* Detect MAC and IP address conflicts between the requested addresses for a newly created pods and the addresses that +are already allocated in the network. +* After configuring custom default gateway and management addresses on a Layer2 UDN the previous default +IPs can be consumed by workloads(e.g. for 10.0.0.0/16 network create pods with 10.0.0.1 and 10.0.0.2 addresses). +* Modifying the default gateway and management addresses on a Layer2 UDN should not be possible after the network +was created. + +The scenarios mentioned above have to cover both IPv4 and IPv6 IP families. + +### Documentation Details + +## Risks, Known Limitations and Mitigations + +* Modifying the 'v1.multus-cni.io/default-network' value after the pod was created could have unpredictable +consequences. +To mitigate this introduce a Validating Admission Policy described in [Implementation Details](#implementation-details). + +* By allowing users to specify the IP and MAC addresses for the pods there is a risk of conflicts. +To mitigate this OVN-Kubernetes will check that the requested addresses are not currently used in the UDN. +There is still a risk that the user picks an address that's consumed by something outside of the UDN but that's beyond +what OVN-Kubernetes controls and can check. + +* The dynamic, per-node subnet allocation in Layer3 UDNs, where each node has a unique default gateway and +management IP, makes user-specified UDN gateway/management IPs and static pod IP/MAC assignments very complex. This +enhancement will not support Layer3 UDNs. + +* BGP support today is limited to cluster UDNs, to ensure a non-NATed traffic for pods with predefined addresses +the user has to use a cluster UDN to configure the network. This is a limitation unrelated to this enhancement +and it is possible it will be solved in the future. + +* By consuming the 'v1.multus-cni.io/default-network' annotation for altering the primary UDNs pod configuration the +user won't be able to use it for configuring the cluster default network attachment. This is acceptable as there is +currently no support for modifying the cluster default network through this annotation while using primary UDNs. +If there is a requirement in the future another mechanism can be considered. + +* OVN-Kubernetes computes MAC addresses from pod IPs rather than allocating them, which creates potential +MAC address conflicts in a potential scenario where a MAC address previously used by a stopped VM gets consumed by +OVN-Kubernetes for a dynamically allocated IP. To mitigate these conflicts, users will have to use a different +MAC address and recreate the workload. For importing workloads that already use this prefix, a future enhancement +could add a field to the Layer2 spec allowing users to specify a custom MAC prefix for the UDN. + +## OVN Kubernetes Version Skew + +## Alternatives + +* Instead of the [Pod network identity](#pod-network-identity) approach, we could expand the +IPAMClaim API. It currently lacks IP request capabilities, and using IPAMClaim for MAC addresses +is confusing. Introducing a new API would mean deprecating the IPAMClaim, while managing +upgrades and supporting both solutions for a period of time. This requires significant effort, which +is not feasible at this time. + +* As described in the [NetworkSelectionElement annotation](#networkselectionelement-annotation) section, using the +`v1.multus-cni.io/default-network` annotation means Multus strictly requires this annotation's value to reference an +existing NAD. An alternative to the proposed approach would be to reference the NAD that defines the primary network. +It was discarded as it would require OVN-Kubernetes to modify the CNI handling logic because multus +would target the CNI requests towards the custom network. Additionally it would require users to determine the exact +NAD name and namespace for every primary UDN pod needing custom MAC, IP, or IPAMClaim. + +## References + +* [IPAMClaim CRD doc](https://docs.google.com/document/d/1OQIJIrCtsYpR5O44w0hpoJ2TyKBz1Du-KhRT4RtrAjk) - `IPAM allocation on behalf of other entities` section + +* IPAMClaim status conditions pull request: diff --git a/go-controller/.golangci.yml b/go-controller/.golangci.yml index 8f60edab95..91be64adc3 100644 --- a/go-controller/.golangci.yml +++ b/go-controller/.golangci.yml @@ -33,6 +33,7 @@ linters-settings: - default - prefix(k8s.io,sigs.k8s.io) - prefix(github.com/ovn-org) + - prefix(github.com/ovn-kubernetes) - localmodule - dot @@ -41,7 +42,7 @@ linters-settings: disable: - fieldalignment - shadow - + importas: no-unaliased: true alias: @@ -57,9 +58,13 @@ linters-settings: - pkg: sigs.k8s.io/controller-runtime alias: ctrl # Other frequently used deps - - pkg: github.com/ovn-org/libovsdb/ovsdb + - pkg: github.com/ovn-kubernetes/libovsdb/ovsdb alias: "" - + - pkg: github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util + alias: nodeutil + - pkg: github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types + alias: nodetypes + revive: rules: # TODO: enable recommended (default) revive rules diff --git a/go-controller/Makefile b/go-controller/Makefile index 9b2a59e595..f27bb979e5 100644 --- a/go-controller/Makefile +++ b/go-controller/Makefile @@ -22,8 +22,7 @@ else CONTAINER_RUNTIME=docker endif CONTAINER_RUNNABLE ?= $(shell $(CONTAINER_RUNTIME) -v > /dev/null 2>&1; echo $$?) -# FIXME(tssurya): In one week when OVN 24.09 is released change the schema version -OVN_SCHEMA_VERSION ?= 8efac26f6637fc +OVN_SCHEMA_VERSION ?= v25.03.1 OVS_VERSION ?= v2.17.0 ifeq ($(NOROOT),TRUE) C_ARGS = -e NOROOT=TRUE @@ -93,9 +92,17 @@ clean: lint: ifeq ($(CONTAINER_RUNNABLE), 0) - @GOPATH=${GOPATH} ./hack/lint.sh $(CONTAINER_RUNTIME) + @GOPATH=${GOPATH} ./hack/lint.sh $(CONTAINER_RUNTIME) || { echo "lint failed! Try running 'make lint-fix'"; exit 1; } else - echo "linter can only be run within a container since it needs a specific golangci-lint version" + echo "linter can only be run within a container since it needs a specific golangci-lint version"; exit 1 +endif + +lint-fix: +ifeq ($(CONTAINER_RUNNABLE), 0) + @GOPATH=${GOPATH} ./hack/lint.sh $(CONTAINER_RUNTIME) fix || { echo "ERROR: lint fix failed! There is a bug that changes file ownership to root \ + when this happens. To fix it, simply run 'chown -R : *' from the repo root."; exit 1; } +else + echo "linter can only be run within a container since it needs a specific golangci-lint version"; exit 1 endif gofmt: diff --git a/go-controller/cmd/ovnkube/ovnkube.go b/go-controller/cmd/ovnkube/ovnkube.go index 2dc1189c62..4211a7fec5 100644 --- a/go-controller/cmd/ovnkube/ovnkube.go +++ b/go-controller/cmd/ovnkube/ovnkube.go @@ -22,7 +22,7 @@ import ( "k8s.io/klog/v2" kexec "k8s.io/utils/exec" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/clustermanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/go.mod b/go-controller/go.mod index a7b86b1ed1..72e89c3b7a 100644 --- a/go-controller/go.mod +++ b/go-controller/go.mod @@ -23,7 +23,7 @@ require ( github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.0 github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 - github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha + github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 github.com/k8snetworkplumbingwg/sriovnet v1.2.1-0.20230427090635-4929697df2dc @@ -38,7 +38,7 @@ require ( github.com/onsi/gomega v1.36.1 github.com/openshift/api v0.0.0-20231120222239-b86761094ee3 github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a - github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 + github.com/ovn-kubernetes/libovsdb v0.8.0 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 github.com/safchain/ethtool v0.3.1-0.20231027162144-83e5e0097c91 @@ -47,9 +47,9 @@ require ( github.com/urfave/cli/v2 v2.27.2 github.com/vishvananda/netlink v1.3.1-0.20250206174618-62fb240731fa golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 - golang.org/x/net v0.30.0 - golang.org/x/sync v0.8.0 - golang.org/x/sys v0.26.0 + golang.org/x/net v0.38.0 + golang.org/x/sync v0.12.0 + golang.org/x/sys v0.31.0 golang.org/x/time v0.7.0 google.golang.org/grpc v1.65.0 google.golang.org/grpc/security/advancedtls v0.0.0-20240425232638-1e8b9b7fc655 @@ -57,12 +57,12 @@ require ( gopkg.in/fsnotify/fsnotify.v1 v1.4.7 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/natefinch/lumberjack.v2 v2.2.1 - k8s.io/api v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/client-go v0.32.3 + k8s.io/api v0.32.5 + k8s.io/apimachinery v0.32.5 + k8s.io/client-go v0.32.5 k8s.io/component-helpers v0.32.3 k8s.io/klog/v2 v2.130.1 - k8s.io/kubernetes v1.32.3 + k8s.io/kubernetes v1.32.6 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 kubevirt.io/api v1.0.0-alpha.0 sigs.k8s.io/controller-runtime v0.20.3 @@ -124,10 +124,10 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.28.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect diff --git a/go-controller/go.sum b/go-controller/go.sum index 93bf3489f5..2af1883f7e 100644 --- a/go-controller/go.sum +++ b/go-controller/go.sum @@ -494,8 +494,8 @@ github.com/juju/version v0.0.0-20161031051906-1f41e27e54f2/go.mod h1:kE8gK5X0CIm github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 h1:iSncnlC+rtlNOIpPa3fbqQMhpTscGJIlkiWaPl1VcS4= github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47/go.mod h1:SPaDIyUmwN03Bgn0u/mhoiE4o/+koeKh11VUsdsUX0U= -github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha h1:ss+EP77GlQmh90hGKpnAG4Q3VVxRlB7GoncemaPtO4g= -github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha/go.mod h1:qlR+sKxQ2OGfwhFCuXSd7rJ/GgC38vQBeHKQ7f2YnpI= +github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha h1:b3iHeks/KTzhG2dNanaUZcFEJwJbYBZY16jxCaVv9i8= +github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha/go.mod h1:MGaMX1tJ7MlHDee4/xmqp3guQh+eDiuCLAauqD9K11Q= github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 h1:Egj1hEVYNXWFlKpgzAXxe/2o8VNiVcAJLrKzlinILQo= github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1/go.mod h1:kEJ4WM849yNmXekuSXLRwb+LaZ9usC06O8JgoAIq+f4= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 h1:BT3ghAY0q7lWib9rz+tVXDFkm27dJV6SLCn7TunZwo4= @@ -639,8 +639,8 @@ github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a h1:4FVrw8hz0Wb github.com/openshift/client-go v0.0.0-20231121143148-910ca30a1a9a/go.mod h1:arApQobmOjZqtxw44TwnQdUCH+t9DgZ8geYPFqksHws= github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= -github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 h1:OoDvzyaK7F/ZANIIFOgb4Haj7mye3Hle0fYZZNdidSs= -github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20/go.mod h1:dJbxEaalQl83nn904K32FaMjlH/qOObZ0bj4ejQ78AI= +github.com/ovn-kubernetes/libovsdb v0.8.0 h1:cWhqWb5rCiS3yTJ6VJ7s85cElE1NWWJ2XksPGLd5WII= +github.com/ovn-kubernetes/libovsdb v0.8.0/go.mod h1:8nqWvM5pjHRbI5K6Uy/yuA5MdhCnGhNFH5fsSjZD8Rc= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -841,8 +841,8 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -934,8 +934,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -945,8 +945,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -958,8 +958,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1047,14 +1047,14 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1064,8 +1064,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1317,8 +1317,8 @@ k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.7/go.mod h1:7hejA1BgBEiSsWljUyRkIjj+AISXO16IwsaDgFjJsQE= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/api v0.32.5 h1:uqjjsYo1kTJr5NIcoIaP9F+TgXgADH7nKQx91FDAhtk= +k8s.io/api v0.32.5/go.mod h1:bXXFU3fGCZ/eFMZvfHZC69PeGbXEL4zzjuPVzOxHF64= k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= @@ -1326,8 +1326,8 @@ k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRp k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.22.7/go.mod h1:ZvVLP5iLhwVFg2Yx9Gh5W0um0DUauExbRhe+2Z8I1EU= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apimachinery v0.32.5 h1:6We3aJ6crC0ap8EhsEXcgX3LpI6SEjubpiOMXLROwPM= +k8s.io/apimachinery v0.32.5/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -1335,8 +1335,8 @@ k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.7/go.mod h1:pGU/tWSzzvsYT7M3npHhoZ3Jh9qJTTIvFvDtWuW31dw= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/client-go v0.32.5 h1:huFmQMzgWu0z4kbWsuZci+Gt4Fo72I4CcrvhToZ/Qp0= +k8s.io/client-go v0.32.5/go.mod h1:Qchw6f9WIVrur7DKojAHpRgGLcANT0RLIvF39Jz58xA= k8s.io/code-generator v0.22.7/go.mod h1:iOZwYADSgFPNGWfqHFfg1V0TNJnl1t0WyZluQp4baqU= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= @@ -1368,8 +1368,8 @@ k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lV k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= -k8s.io/kubernetes v1.32.3 h1:2A58BlNME8NwsMawmnM6InYo3Jf35Nw5G79q46kXwoA= -k8s.io/kubernetes v1.32.3/go.mod h1:GvhiBeolvSRzBpFlgM0z/Bbu3Oxs9w3P6XfEgYaMi8k= +k8s.io/kubernetes v1.32.6 h1:tp1gRjOqZjaoFBek5PN6eSmODdS1QRrH5UKiFP8ZByg= +k8s.io/kubernetes v1.32.6/go.mod h1:REY0Gok66BTTrbGyZaFMNKO9JhxvgBDW9B7aksWRFoY= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/go-controller/hack/build-go.sh b/go-controller/hack/build-go.sh index 17b963adea..13a12e0bf9 100755 --- a/go-controller/hack/build-go.sh +++ b/go-controller/hack/build-go.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e GO=${GO:-go} diff --git a/go-controller/hack/init.sh b/go-controller/hack/init.sh index 69dcb8f73e..98a210ea71 100755 --- a/go-controller/hack/init.sh +++ b/go-controller/hack/init.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash OUT_DIR=${OUT_DIR:-_output} diff --git a/go-controller/hack/lint.sh b/go-controller/hack/lint.sh index 5ac32e96dd..57f4695827 100755 --- a/go-controller/hack/lint.sh +++ b/go-controller/hack/lint.sh @@ -1,14 +1,18 @@ #!/usr/bin/env bash - VERSION=v1.60.3 +extra_flags="" if [ "$#" -ne 1 ]; then + if [ "$#" -eq 2 ] && [ "$2" == "fix" ]; then + extra_flags="--fix" + else echo "Expected command line argument - container runtime (docker/podman) got $# arguments: $@" exit 1 + fi fi $1 run --security-opt label=disable --rm \ -v ${HOME}/.cache/golangci-lint:/cache -e GOLANGCI_LINT_CACHE=/cache \ -v $(pwd):/app -w /app -e GO111MODULE=on docker.io/golangci/golangci-lint:${VERSION} \ golangci-lint run --verbose --print-resources-usage \ - --modules-download-mode=vendor --timeout=15m0s && \ - echo "lint OK!" + --modules-download-mode=vendor --timeout=15m0s ${extra_flags} && \ + echo "lint OK!" \ No newline at end of file diff --git a/go-controller/hack/regenerate_vendor_mocks.sh b/go-controller/hack/regenerate_vendor_mocks.sh index a94d4481cd..c1e94c89e7 100755 --- a/go-controller/hack/regenerate_vendor_mocks.sh +++ b/go-controller/hack/regenerate_vendor_mocks.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash workdir=$(cd ../ && pwd) substitute_string='pkg/testing/mocks' diff --git a/go-controller/hack/test-go.sh b/go-controller/hack/test-go.sh index b41bdc8817..f2df39f672 100755 --- a/go-controller/hack/test-go.sh +++ b/go-controller/hack/test-go.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e source "$(dirname "${BASH_SOURCE}")/init.sh" diff --git a/go-controller/hack/update-modelgen.sh b/go-controller/hack/update-modelgen.sh index 18c6f1a3cd..c17239f669 100755 --- a/go-controller/hack/update-modelgen.sh +++ b/go-controller/hack/update-modelgen.sh @@ -4,12 +4,12 @@ set -o pipefail # generate ovsdb bindings if ! ( command -v modelgen > /dev/null ); then - echo "modelgen not found, installing github.com/ovn-org/libovsdb/cmd/modelgen" + echo "modelgen not found, installing github.com/ovn-kubernetes/libovsdb/cmd/modelgen" olddir="${PWD}" builddir="$(mktemp -d)" cd "${builddir}" # ensure the hash value is not outdated, if wrong bindings are being generated re-install modelgen - GO111MODULE=on go install github.com/ovn-org/libovsdb/cmd/modelgen@v0.7.0 + GO111MODULE=on go install github.com/ovn-kubernetes/libovsdb/cmd/modelgen@v0.8.0 cd "${olddir}" if [[ "${builddir}" == /tmp/* ]]; then #paranoia rm -rf "${builddir}" diff --git a/go-controller/hack/verify-go-mod-vendor.sh b/go-controller/hack/verify-go-mod-vendor.sh index 39ce9104bc..fd865e965c 100755 --- a/go-controller/hack/verify-go-mod-vendor.sh +++ b/go-controller/hack/verify-go-mod-vendor.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -o errexit # Nozero exit code of any of the commands below will fail the test. set -o nounset set -o pipefail diff --git a/go-controller/hack/verify-gofmt.sh b/go-controller/hack/verify-gofmt.sh index de5f65452b..d2c73e47d3 100755 --- a/go-controller/hack/verify-gofmt.sh +++ b/go-controller/hack/verify-gofmt.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -o errexit set -o nounset diff --git a/go-controller/hybrid-overlay/pkg/controller/ho_node_windows.go b/go-controller/hybrid-overlay/pkg/controller/ho_node_windows.go index 5de9b75391..339bc289f7 100644 --- a/go-controller/hybrid-overlay/pkg/controller/ho_node_windows.go +++ b/go-controller/hybrid-overlay/pkg/controller/ho_node_windows.go @@ -57,7 +57,7 @@ func newNodeController(kube kube.Interface, "UDP port. Please make sure you install all the KB updates on your system.") } - node, err := kube.GetNode(nodeName) + node, err := kube.GetNodeForWindows(nodeName) if err != nil { return nil, err } @@ -345,7 +345,7 @@ func (n *NodeController) initSelf(node *corev1.Node, nodeSubnet *net.IPNet) erro } // Add existing nodes - nodes, err := n.kube.GetNodes() + nodes, err := n.kube.GetNodesForWindows() if err != nil { return fmt.Errorf("error in initializing/fetching nodes: %v", err) } @@ -370,7 +370,7 @@ func (n *NodeController) uninitSelf(node *corev1.Node) error { networkName, n.networkID, node.Name) // Remove existing nodes - nodes, err := n.kube.GetNodes() + nodes, err := n.kube.GetNodesForWindows() if err != nil { return fmt.Errorf("failed to get nodes: %v", err) } diff --git a/go-controller/hybrid-overlay/pkg/controller/ovn_node_linux.go b/go-controller/hybrid-overlay/pkg/controller/ovn_node_linux.go index 3c74239db0..df8a9559c8 100644 --- a/go-controller/hybrid-overlay/pkg/controller/ovn_node_linux.go +++ b/go-controller/hybrid-overlay/pkg/controller/ovn_node_linux.go @@ -261,7 +261,7 @@ func (n *NodeController) AddNode(node *corev1.Node) error { } else { // Make sure the local node has been initialized before adding a hybridOverlay remote node if atomic.LoadUint32(n.initState) < hotypes.DistributedRouterInitialized { - localNode, err := n.kube.GetNode(n.nodeName) + localNode, err := n.nodeLister.Get(n.nodeName) if err != nil { return fmt.Errorf("cannot get local node: %s: %w", n.nodeName, err) } diff --git a/go-controller/observability-lib/ovsdb/bridge.go b/go-controller/observability-lib/ovsdb/bridge.go index d0135c4886..d918918bb0 100644 --- a/go-controller/observability-lib/ovsdb/bridge.go +++ b/go-controller/observability-lib/ovsdb/bridge.go @@ -3,7 +3,7 @@ package ovsdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const BridgeTable = "Bridge" diff --git a/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go b/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go index 57a26e805d..b4b67f6055 100644 --- a/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go +++ b/go-controller/observability-lib/ovsdb/flow_sample_collector_set.go @@ -3,7 +3,7 @@ package ovsdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const FlowSampleCollectorSetTable = "Flow_Sample_Collector_Set" diff --git a/go-controller/observability-lib/ovsdb/interface.go b/go-controller/observability-lib/ovsdb/interface.go index e9f350995c..9e59b20738 100644 --- a/go-controller/observability-lib/ovsdb/interface.go +++ b/go-controller/observability-lib/ovsdb/interface.go @@ -3,7 +3,7 @@ package ovsdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const InterfaceTable = "Interface" diff --git a/go-controller/observability-lib/ovsdb/observ_model.go b/go-controller/observability-lib/ovsdb/observ_model.go index 22547a3f8c..4667acf5d5 100644 --- a/go-controller/observability-lib/ovsdb/observ_model.go +++ b/go-controller/observability-lib/ovsdb/observ_model.go @@ -1,6 +1,6 @@ package ovsdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" // ObservDatabaseModel returns the DatabaseModel object to be used by observability library. func ObservDatabaseModel() (model.ClientDBModel, error) { diff --git a/go-controller/observability-lib/sampledecoder/db_client.go b/go-controller/observability-lib/sampledecoder/db_client.go index 5587646356..9d65645601 100644 --- a/go-controller/observability-lib/sampledecoder/db_client.go +++ b/go-controller/observability-lib/sampledecoder/db_client.go @@ -10,8 +10,8 @@ import ( "k8s.io/klog/v2/textlogger" - "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/observability-lib/sampledecoder/sample_decoder.go b/go-controller/observability-lib/sampledecoder/sample_decoder.go index 341a0d1c18..d92c03b3e8 100644 --- a/go-controller/observability-lib/sampledecoder/sample_decoder.go +++ b/go-controller/observability-lib/sampledecoder/sample_decoder.go @@ -7,7 +7,7 @@ import ( "fmt" "strings" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/model" "github.com/ovn-org/ovn-kubernetes/go-controller/observability-lib/ovsdb" diff --git a/go-controller/pkg/clustermanager/clustermanager_test.go b/go-controller/pkg/clustermanager/clustermanager_test.go index f97de8fc3f..66535f4c8a 100644 --- a/go-controller/pkg/clustermanager/clustermanager_test.go +++ b/go-controller/pkg/clustermanager/clustermanager_test.go @@ -34,10 +34,9 @@ const ( var _ = ginkgo.Describe("Cluster Manager", func() { var ( - app *cli.App - f *factory.WatchFactory - stopChan chan struct{} - wg *sync.WaitGroup + app *cli.App + f *factory.WatchFactory + wg *sync.WaitGroup ) const ( @@ -54,12 +53,10 @@ var _ = ginkgo.Describe("Cluster Manager", func() { app = cli.NewApp() app.Name = "test" app.Flags = config.Flags - stopChan = make(chan struct{}) wg = &sync.WaitGroup{} }) ginkgo.AfterEach(func() { - close(stopChan) if f != nil { f.Shutdown() } @@ -1436,4 +1433,102 @@ var _ = ginkgo.Describe("Cluster Manager", func() { }) }) + ginkgo.Context("starting the cluster manager", func() { + const networkName = "default" + + var fakeClient *util.OVNClusterManagerClientset + + ginkgo.BeforeEach(func() { + fakeClient = util.GetOVNClientset().GetClusterManagerClientset() + }) + + ginkgo.When("the required features are not enabled", func() { + ginkgo.It("does *not* automatically provision a NAD for the default network", func() { + app.Action = func(ctx *cli.Context) error { + _, err := config.InitConfig(ctx, nil, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + f, err = factory.NewClusterManagerWatchFactory(fakeClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + clusterMngr, err := clusterManager(fakeClient, f) + gomega.Expect(clusterMngr).NotTo(gomega.BeNil()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(clusterMngr.Start(ctx.Context)).To(gomega.Succeed()) + + _, err = fakeClient.NetworkAttchDefClient. + K8sCniCncfIoV1(). + NetworkAttachmentDefinitions(config.Kubernetes.OVNConfigNamespace). + Get( + context.Background(), + networkName, + metav1.GetOptions{}, + ) + gomega.Expect(err).To( + gomega.MatchError("network-attachment-definitions.k8s.cni.cncf.io \"default\" not found"), + ) + + return nil + } + gomega.Expect(app.Run([]string{app.Name})).To(gomega.Succeed()) + }) + }) + + ginkgo.When("the multi-network, network-segmentation, and preconfigured-udn-addresses features are enabled", func() { + ginkgo.BeforeEach(func() { + config.OVNKubernetesFeature.EnableMultiNetwork = true + config.OVNKubernetesFeature.EnableNetworkSegmentation = true + config.OVNKubernetesFeature.EnablePreconfiguredUDNAddresses = true + }) + + ginkgo.It("automatically provisions a NAD for the default network", func() { + app.Action = func(ctx *cli.Context) error { + _, err := config.InitConfig(ctx, nil, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + f, err = factory.NewClusterManagerWatchFactory(fakeClient) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + clusterMngr, err := clusterManager(fakeClient, f) + gomega.Expect(clusterMngr).NotTo(gomega.BeNil()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + c, cancel := context.WithCancel(ctx.Context) + defer cancel() + gomega.Expect(clusterMngr.Start(c)).To(gomega.Succeed()) + defer clusterMngr.Stop() + + nad, err := fakeClient.NetworkAttchDefClient. + K8sCniCncfIoV1(). + NetworkAttachmentDefinitions(config.Kubernetes.OVNConfigNamespace). + Get( + context.Background(), + networkName, + metav1.GetOptions{}, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + const expectedNADContents = `{"cniVersion": "0.4.0", "name": "ovn-kubernetes", "type": "ovn-k8s-cni-overlay"}` + gomega.Expect(nad.Spec.Config).To(gomega.Equal(expectedNADContents)) + + return nil + } + gomega.Expect(app.Run([]string{app.Name})).To(gomega.Succeed()) + }) + }) + }) + }) + +func clusterManager(client *util.OVNClusterManagerClientset, f *factory.WatchFactory) (*ClusterManager, error) { + if err := f.Start(); err != nil { + return nil, fmt.Errorf("failed to start the CM watch factory: %w", err) + } + + clusterMngr, err := NewClusterManager(client, f, "identity", nil) + if err != nil { + return nil, fmt.Errorf("failed to start the CM watch factory: %w", err) + } + + return clusterMngr, nil +} diff --git a/go-controller/pkg/clustermanager/network_cluster_controller.go b/go-controller/pkg/clustermanager/network_cluster_controller.go index af7edc661f..7193310e4c 100644 --- a/go-controller/pkg/clustermanager/network_cluster_controller.go +++ b/go-controller/pkg/clustermanager/network_cluster_controller.go @@ -16,6 +16,7 @@ import ( cache "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" + k8snodeutil "k8s.io/component-helpers/node/util" "k8s.io/klog/v2" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/id" @@ -475,7 +476,7 @@ func (ncc *networkClusterController) Reconcile(netInfo util.NetInfo) error { klog.Errorf("Failed to reconcile network %s: %v", ncc.GetNetworkName(), err) } if reconcilePendingPods && ncc.retryPods != nil { - if err := objretry.RequeuePendingPods(ncc.kube, ncc.GetNetInfo(), ncc.retryPods); err != nil { + if err := objretry.RequeuePendingPods(ncc.watchFactory, ncc.GetNetInfo(), ncc.retryPods); err != nil { klog.Errorf("Failed to requeue pending pods for network %s: %v", ncc.GetNetworkName(), err) } } @@ -576,7 +577,10 @@ func (h *networkClusterControllerEventHandler) UpdateResource(oldObj, newObj int // 1. we missed an add event (bug in kapi informer code) // 2. a user removed the annotation on the node // Either way to play it safe for now do a partial json unmarshal check - if !nodeFailed && util.NoHostSubnet(oldNode) != util.NoHostSubnet(newNode) && !h.ncc.nodeAllocator.NeedsNodeAllocation(newNode) { + _, nodeCondition := k8snodeutil.GetNodeCondition(&newNode.Status, corev1.NodeNetworkUnavailable) + nodeNetworkUnavailable := nodeCondition != nil && nodeCondition.Status == corev1.ConditionTrue + if !nodeFailed && util.NoHostSubnet(oldNode) == util.NoHostSubnet(newNode) && + !h.ncc.nodeAllocator.NeedsNodeAllocation(newNode) && !nodeNetworkUnavailable { // no other node updates would require us to reconcile again return nil } diff --git a/go-controller/pkg/clustermanager/node/node_allocator.go b/go-controller/pkg/clustermanager/node/node_allocator.go index 63593618b2..83c3d80fde 100644 --- a/go-controller/pkg/clustermanager/node/node_allocator.go +++ b/go-controller/pkg/clustermanager/node/node_allocator.go @@ -195,27 +195,24 @@ func (na *NodeAllocator) NeedsNodeAllocation(node *corev1.Node) bool { } // ovn node check - // allocation is all or nothing, so if one field was allocated from: - // nodeSubnets, joinSubnet, layer 2 tunnel id, then all of them were if na.hasNodeSubnetAllocation() { - if util.HasNodeHostSubnetAnnotation(node, na.netInfo.GetNetworkName()) { - return false + if !util.HasNodeHostSubnetAnnotation(node, na.netInfo.GetNetworkName()) { + return true } } - if na.hasJoinSubnetAllocation() { - if util.HasNodeGatewayRouterJoinNetwork(node, na.netInfo.GetNetworkName()) { - return false + if !util.HasNodeGatewayRouterJoinNetwork(node, na.netInfo.GetNetworkName()) { + return true } } if util.IsNetworkSegmentationSupportEnabled() && na.netInfo.IsPrimaryNetwork() && util.DoesNetworkRequireTunnelIDs(na.netInfo) { - if util.HasUDNLayer2NodeGRLRPTunnelID(node, na.netInfo.GetNetworkName()) { - return false + if !util.HasUDNLayer2NodeGRLRPTunnelID(node, na.netInfo.GetNetworkName()) { + return true } } - return true + return false } diff --git a/go-controller/pkg/clustermanager/routeadvertisements/controller.go b/go-controller/pkg/clustermanager/routeadvertisements/controller.go index 04daa6cde1..cffbb3425e 100644 --- a/go-controller/pkg/clustermanager/routeadvertisements/controller.go +++ b/go-controller/pkg/clustermanager/routeadvertisements/controller.go @@ -373,10 +373,6 @@ func (c *Controller) generateFRRConfigurations(ra *ratypes.RouteAdvertisements) return nil, nil, fmt.Errorf("%w: selected network %q has unsupported topology %q", errConfig, networkName, network.TopologyType()) } - if config.Gateway.Mode == config.GatewayModeLocal && network.TopologyType() == types.Layer2Topology { - return nil, nil, fmt.Errorf("%w: BGP is currently not supported for Layer2 networks in local gateway mode, network: %s", errConfig, network.GetNetworkName()) - } - if advertisements.Has(ratypes.EgressIP) && network.TopologyType() == types.Layer2Topology { return nil, nil, fmt.Errorf("%w: EgressIP advertisement is currently not supported for Layer2 networks, network: %s", errConfig, network.GetNetworkName()) } @@ -597,14 +593,13 @@ func (c *Controller) generateFRRConfiguration( matchedNetworks sets.Set[string], ) (*frrtypes.FRRConfiguration, error) { routers := []frrtypes.Router{} - advertisements := sets.New(ra.Spec.Advertisements...) // go over the source routers for i, router := range source.Spec.BGP.Routers { targetVRF := ra.Spec.TargetVRF var matchedVRF, matchedNetwork string - var receivePrefixes, advertisePrefixes []string + var advertisePrefixes []string // We will use the router if: // - the router VRF matches the target VRF @@ -612,33 +607,25 @@ func (c *Controller) generateFRRConfiguration( // Prepare each scenario with a switch statement and check after that switch { case targetVRF == "auto" && router.VRF == "": - // match on default network/VRF, advertise node prefixes and receive - // any prefix of default network. + // match on default network/VRF, advertise node prefixes matchedVRF = "" matchedNetwork = types.DefaultNetworkName advertisePrefixes = selectedNetworks.hostNetworkSubnets[matchedNetwork] - receivePrefixes = selectedNetworks.networkSubnets[matchedNetwork] case targetVRF == "auto": - // match router.VRF to network.VRF, advertise node prefixes and - // receive any prefix of the matched network + // match router.VRF to network.VRF, advertise node prefixes matchedVRF = router.VRF matchedNetwork = selectedNetworks.networkVRFs[matchedVRF] advertisePrefixes = selectedNetworks.hostNetworkSubnets[matchedNetwork] - receivePrefixes = selectedNetworks.networkSubnets[matchedNetwork] case targetVRF == "": - // match on default network/VRF, advertise node prefixes and - // receive any prefix of selected networks + // match on default network/VRF, advertise node prefixes matchedVRF = "" matchedNetwork = types.DefaultNetworkName advertisePrefixes = selectedNetworks.hostSubnets - receivePrefixes = selectedNetworks.subnets default: - // match router.VRF to network.VRF, advertise node prefixes and - // receive any prefix of selected networks + // match router.VRF to network.VRF, advertise node prefixes matchedVRF = targetVRF matchedNetwork = selectedNetworks.networkVRFs[matchedVRF] advertisePrefixes = selectedNetworks.hostSubnets - receivePrefixes = selectedNetworks.subnets } if matchedVRF != router.VRF || len(advertisePrefixes) == 0 { // either this router VRF does not match the target VRF or we don't @@ -673,7 +660,6 @@ func (c *Controller) generateFRRConfiguration( isIPV6 := utilnet.IsIPv6String(neighbor.Address) advertisePrefixes := util.MatchAllIPNetsStringFamily(isIPV6, advertisePrefixes) - receivePrefixes := util.MatchAllIPNetsStringFamily(isIPV6, receivePrefixes) if len(advertisePrefixes) == 0 { continue } @@ -684,22 +670,6 @@ func (c *Controller) generateFRRConfiguration( Prefixes: advertisePrefixes, }, } - neighbor.ToReceive = frrtypes.Receive{ - Allowed: frrtypes.AllowedInPrefixes{ - Mode: frrtypes.AllowRestricted, - }, - } - if advertisements.Has(ratypes.PodNetwork) { - for _, prefix := range receivePrefixes { - neighbor.ToReceive.Allowed.Prefixes = append(neighbor.ToReceive.Allowed.Prefixes, - frrtypes.PrefixSelector{ - Prefix: prefix, - LE: selectedNetworks.prefixLength[prefix], - GE: selectedNetworks.prefixLength[prefix], - }, - ) - } - } targetRouter.Neighbors = append(targetRouter.Neighbors, neighbor) } if len(targetRouter.Neighbors) == 0 { @@ -955,10 +925,18 @@ func (c *Controller) updateRAStatus(ra *ratypes.RouteAdvertisements, hadUpdates return nil } + var updateStatus bool condition := meta.FindStatusCondition(ra.Status.Conditions, "Accepted") - updateStatus := hadUpdates || condition == nil || condition.ObservedGeneration != ra.Generation - updateStatus = updateStatus || err != nil - + switch { + case condition == nil: + fallthrough + case condition.ObservedGeneration != ra.Generation: + fallthrough + case (err == nil) != (condition.Status == metav1.ConditionTrue): + fallthrough + case hadUpdates: + updateStatus = true + } if !updateStatus { return nil } @@ -1012,7 +990,7 @@ func (c *Controller) getSelectedNADs(networkSelectors apitypes.NetworkSelectors) case apitypes.DefaultNetwork: // if we are selecting the default networkdefault network label, // make sure a NAD exists for it - nad, err := c.getOrCreateDefaultNetworkNAD() + nad, err := util.EnsureDefaultNetworkNAD(c.nadLister, c.nadClient) if err != nil { return nil, fmt.Errorf("failed to get/create default network NAD: %w", err) } @@ -1043,34 +1021,6 @@ func (c *Controller) getSelectedNADs(networkSelectors apitypes.NetworkSelectors) return selected, nil } -// getOrCreateDefaultNetworkNAD ensure that a well-known NAD exists for the -// default network in ovn-k namespace. -func (c *Controller) getOrCreateDefaultNetworkNAD() (*nadtypes.NetworkAttachmentDefinition, error) { - nad, err := c.nadLister.NetworkAttachmentDefinitions(config.Kubernetes.OVNConfigNamespace).Get(types.DefaultNetworkName) - if err != nil && !apierrors.IsNotFound(err) { - return nil, err - } - if nad != nil { - return nad, nil - } - return c.nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(config.Kubernetes.OVNConfigNamespace).Create( - context.Background(), - &nadtypes.NetworkAttachmentDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: types.DefaultNetworkName, - Namespace: config.Kubernetes.OVNConfigNamespace, - }, - Spec: nadtypes.NetworkAttachmentDefinitionSpec{ - Config: fmt.Sprintf("{\"cniVersion\": \"0.4.0\", \"name\": \"ovn-kubernetes\", \"type\": \"%s\"}", config.CNI.Plugin), - }, - }, - // note we don't set ourselves as field manager for this create as we - // want to process the resulting event that would otherwise be filtered - // out in nadNeedsUpdate - metav1.CreateOptions{}, - ) -} - // getEgressIPsByNodesByNetworks iterates all existing egress IPs that apply to // any of the provided networks and returns a "node -> network -> eips" // map. diff --git a/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go b/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go index 03e9391888..c03c851808 100644 --- a/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go +++ b/go-controller/pkg/clustermanager/routeadvertisements/controller_test.go @@ -47,6 +47,7 @@ type testRA struct { SelectsDefault bool AdvertisePods bool AdvertiseEgressIPs bool + Status *metav1.ConditionStatus } func (tra testRA) RouteAdvertisements() *ratypes.RouteAdvertisements { @@ -92,6 +93,9 @@ func (tra testRA) RouteAdvertisements() *ratypes.RouteAdvertisements { MatchLabels: tra.FRRConfigurationSelector, } } + if tra.Status != nil { + ra.Status.Conditions = []metav1.Condition{{Type: "Accepted", Status: *tra.Status}} + } return ra } @@ -148,7 +152,6 @@ type testNeighbor struct { ASN uint32 Address string DisableMP *bool - Receive []string Advertise []string } @@ -157,11 +160,6 @@ func (tn testNeighbor) Neighbor() frrapi.Neighbor { ASN: tn.ASN, Address: tn.Address, DisableMP: true, - ToReceive: frrapi.Receive{ - Allowed: frrapi.AllowedInPrefixes{ - Mode: frrapi.AllowRestricted, - }, - }, ToAdvertise: frrapi.Advertise{ Allowed: frrapi.AllowedOutPrefixes{ Mode: frrapi.AllowRestricted, @@ -172,31 +170,6 @@ func (tn testNeighbor) Neighbor() frrapi.Neighbor { if tn.DisableMP != nil { n.DisableMP = *tn.DisableMP } - for _, receive := range tn.Receive { - sep := strings.LastIndex(receive, "/") - if sep == -1 { - continue - } - if isLayer2 := strings.Count(receive, "/") == 1; isLayer2 { - n.ToReceive.Allowed.Prefixes = append(n.ToReceive.Allowed.Prefixes, - frrapi.PrefixSelector{ - Prefix: receive, - }, - ) - continue - } - - first := receive[:sep] - last := receive[sep+1:] - len := ovntest.MustAtoi(last) - n.ToReceive.Allowed.Prefixes = append(n.ToReceive.Allowed.Prefixes, - frrapi.PrefixSelector{ - Prefix: first, - GE: uint32(len), - LE: uint32(len), - }, - ) - } return n } @@ -429,7 +402,7 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node"}, Routers: []*testRouter{ {ASN: 1, Prefixes: []string{"1.0.1.1/32", "1.1.0.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}, Receive: []string{"1.1.0.0/16/24"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}}, }}, }}, }, @@ -461,8 +434,8 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node"}, Routers: []*testRouter{ {ASN: 1, Prefixes: []string{"1.0.1.1/32", "1.1.0.0/24", "fd01::/64", "fd03::ffff:100:101/128"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}, Receive: []string{"1.1.0.0/16/24"}}, - {ASN: 1, Address: "fd02::ffff:100:64", Advertise: []string{"fd01::/64", "fd03::ffff:100:101/128"}, Receive: []string{"fd01::/48/64"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}}, + {ASN: 1, Address: "fd02::ffff:100:64", Advertise: []string{"fd01::/64", "fd03::ffff:100:101/128"}}, }}, }}, }, @@ -499,7 +472,7 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node"}, Routers: []*testRouter{ {ASN: 1, Prefixes: []string{"1.2.0.0/24", "1.3.0.0/24", "1.4.0.0/16", "1.5.0.0/16"}, Imports: []string{"black", "blue", "green", "red"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.2.0.0/24", "1.3.0.0/24", "1.4.0.0/16", "1.5.0.0/16"}, Receive: []string{"1.2.0.0/16/24", "1.3.0.0/16/24", "1.4.0.0/16", "1.5.0.0/16"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.2.0.0/24", "1.3.0.0/24", "1.4.0.0/16", "1.5.0.0/16"}}, }}, {ASN: 1, VRF: "black", Imports: []string{"default"}}, {ASN: 1, VRF: "blue", Imports: []string{"default"}}, @@ -632,7 +605,7 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node"}, Routers: []*testRouter{ {ASN: 1, Prefixes: []string{"1.0.1.1/32", "1.1.0.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}, Receive: []string{"1.1.0.0/16/24"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}}, }}, }, }, @@ -740,13 +713,13 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node1"}, Routers: []*testRouter{ {ASN: 1, Prefixes: []string{"1.1.1.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.1.1.0/24"}, Receive: []string{"1.1.0.0/16/24"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.1.1.0/24"}}, }}, {ASN: 1, VRF: "red", Prefixes: []string{"1.2.1.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.2.1.0/24"}, Receive: []string{"1.2.0.0/16/24"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.2.1.0/24"}}, }}, {ASN: 1, VRF: "green", Prefixes: []string{"1.4.0.0/16"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.4.0.0/16"}, Receive: []string{"1.4.0.0/16"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.4.0.0/16"}}, }}, }, }, @@ -756,7 +729,7 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node2"}, Routers: []*testRouter{ {ASN: 1, Prefixes: []string{"1.1.2.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.1.2.0/24"}, Receive: []string{"1.1.0.0/16/24"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.1.2.0/24"}}, }}, }, }, @@ -766,16 +739,48 @@ func TestController_reconcile(t *testing.T) { NodeSelector: map[string]string{"kubernetes.io/hostname": "node2"}, Routers: []*testRouter{ {ASN: 1, VRF: "red", Prefixes: []string{"1.2.2.0/24"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.2.2.0/24"}, Receive: []string{"1.2.0.0/16/24"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.2.2.0/24"}}, }}, {ASN: 1, VRF: "green", Prefixes: []string{"1.4.0.0/16"}, Neighbors: []*testNeighbor{ - {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.4.0.0/16"}, Receive: []string{"1.4.0.0/16"}}, + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.4.0.0/16"}}, }}, }, }, }, expectNADAnnotations: map[string]map[string]string{"default": {types.OvnRouteAdvertisementsKey: "[\"ra\"]"}, "red": {types.OvnRouteAdvertisementsKey: "[\"ra\"]"}}, }, + { + name: "reconciles RouteAdvertisements status even when no other updates are required", + ra: &testRA{Name: "ra", AdvertisePods: true, AdvertiseEgressIPs: true, SelectsDefault: true, Status: ptr.To(metav1.ConditionFalse)}, + frrConfigs: []*testFRRConfig{ + { + Name: "frrConfig", + Namespace: frrNamespace, + Routers: []*testRouter{ + {ASN: 1, Prefixes: []string{"1.1.1.0/24"}, Neighbors: []*testNeighbor{ + {ASN: 1, Address: "1.0.0.100"}, + }}, + }, + }, + { + Labels: map[string]string{types.OvnRouteAdvertisementsKey: "ra"}, + Annotations: map[string]string{types.OvnRouteAdvertisementsKey: "ra/frrConfig/node"}, + NodeSelector: map[string]string{"kubernetes.io/hostname": "node"}, + Routers: []*testRouter{ + {ASN: 1, Prefixes: []string{"1.0.1.1/32", "1.1.0.0/24"}, Neighbors: []*testNeighbor{ + {ASN: 1, Address: "1.0.0.100", Advertise: []string{"1.0.1.1/32", "1.1.0.0/24"}}, + }}, + }, + }, + }, + nads: []*testNAD{ + {Name: "default", Namespace: "ovn-kubernetes", Network: "default", Annotations: map[string]string{types.OvnRouteAdvertisementsKey: "[\"ra\"]"}}, + }, + nodes: []*testNode{{Name: "node", SubnetsAnnotation: "{\"default\":\"1.1.0.0/24\"}"}}, + eips: []*testEIP{{Name: "eip", EIPs: map[string]string{"node": "1.0.1.1"}}}, + reconcile: "ra", + expectAcceptedStatus: metav1.ConditionTrue, + }, { name: "fails to reconcile a secondary network", ra: &testRA{Name: "ra", AdvertisePods: true, NetworkSelector: map[string]string{"selected": "true"}}, @@ -1005,11 +1010,6 @@ func TestController_reconcile(t *testing.T) { c := NewController(nm.Interface(), wf, fakeClientset) - // prime the default network NAD - if defaultNAD == nil { - defaultNAD, err = c.getOrCreateDefaultNetworkNAD() - g.Expect(err).ToNot(gomega.HaveOccurred()) - } // prime the default network NAD namespace namespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -1018,11 +1018,15 @@ func TestController_reconcile(t *testing.T) { } _, err = fakeClientset.KubeClient.CoreV1().Namespaces().Create(context.Background(), namespace, metav1.CreateOptions{}) g.Expect(err).ToNot(gomega.HaveOccurred()) - - // update it with the annotation that network manager would set - defaultNAD.Annotations = map[string]string{types.OvnNetworkNameAnnotation: types.DefaultNetworkName} - _, err = fakeClientset.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(defaultNAD.Namespace).Update(context.Background(), defaultNAD, metav1.UpdateOptions{}) - g.Expect(err).ToNot(gomega.HaveOccurred()) + // prime the default network NAD + if defaultNAD == nil { + defaultNAD, err = util.EnsureDefaultNetworkNAD(c.nadLister, c.nadClient) + g.Expect(err).ToNot(gomega.HaveOccurred()) + // update it with the annotation that network manager would set + defaultNAD.Annotations = map[string]string{types.OvnNetworkNameAnnotation: types.DefaultNetworkName} + _, err = fakeClientset.NetworkAttchDefClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(defaultNAD.Namespace).Update(context.Background(), defaultNAD, metav1.UpdateOptions{}) + g.Expect(err).ToNot(gomega.HaveOccurred()) + } err = wf.Start() g.Expect(err).ToNot(gomega.HaveOccurred()) @@ -1039,7 +1043,13 @@ func TestController_reconcile(t *testing.T) { ) err = nm.Start() - g.Expect(err).ToNot(gomega.HaveOccurred()) + // some test cases start with a bad RA status, avoid asserting + // initial sync in this case as it will fail + if tt.ra == nil || tt.ra.Status == nil || *tt.ra.Status == metav1.ConditionTrue { + g.Expect(err).ToNot(gomega.HaveOccurred()) + } else { + g.Expect(err).To(gomega.HaveOccurred()) + } // we just need the inital sync nm.Stop() diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/controller.go b/go-controller/pkg/clustermanager/userdefinednetwork/controller.go index e8c1d74a03..14963b9ed9 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/controller.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/controller.go @@ -37,6 +37,7 @@ import ( userdefinednetworkscheme "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned/scheme" userdefinednetworkinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/informers/externalversions/userdefinednetwork/v1" userdefinednetworklister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/listers/userdefinednetwork/v1" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -149,6 +150,12 @@ func (c *Controller) Run() error { return fmt.Errorf("unable to start user-defined network controller: %v", err) } + if util.IsPreconfiguredUDNAddressesEnabled() { + if _, err := util.EnsureDefaultNetworkNAD(c.nadLister, c.nadClient); err != nil { + return fmt.Errorf("failed to ensure default network nad exists: %w", err) + } + } + return nil } @@ -389,6 +396,14 @@ func (c *Controller) syncUserDefinedNetwork(udn *userdefinednetworkv1.UserDefine return nil, nil } + var role, topology string + if udn.Spec.Layer2 != nil { + role = string(udn.Spec.Layer2.Role) + } else if udn.Spec.Layer3 != nil { + role = string(udn.Spec.Layer3.Role) + } + topology = string(udn.Spec.Topology) + if !udn.DeletionTimestamp.IsZero() { // udn is being deleted if controllerutil.ContainsFinalizer(udn, template.FinalizerUserDefinedNetwork) { if err := c.deleteNAD(udn, udn.Namespace); err != nil { @@ -401,6 +416,7 @@ func (c *Controller) syncUserDefinedNetwork(udn *userdefinednetworkv1.UserDefine return nil, fmt.Errorf("failed to remove finalizer to UserDefinedNetwork: %w", err) } klog.Infof("Finalizer removed from UserDefinedNetworks [%s/%s]", udn.Namespace, udn.Name) + metrics.DecrementUDNCount(role, topology) } return nil, nil @@ -412,6 +428,7 @@ func (c *Controller) syncUserDefinedNetwork(udn *userdefinednetworkv1.UserDefine return nil, fmt.Errorf("failed to add finalizer to UserDefinedNetwork: %w", err) } klog.Infof("Added Finalizer to UserDefinedNetwork [%s/%s]", udn.Namespace, udn.Name) + metrics.IncrementUDNCount(role, topology) } return c.updateNAD(udn, udn.Namespace) @@ -539,6 +556,16 @@ func (c *Controller) syncClusterUDN(cudn *userdefinednetworkv1.ClusterUserDefine cudnName := cudn.Name affectedNamespaces := c.namespaceTracker[cudnName] + var role, topology string + if cudn.Spec.Network.Layer2 != nil { + role = string(cudn.Spec.Network.Layer2.Role) + } else if cudn.Spec.Network.Layer3 != nil { + role = string(cudn.Spec.Network.Layer3.Role) + } else if cudn.Spec.Network.Localnet != nil { + role = string(cudn.Spec.Network.Localnet.Role) + } + topology = string(cudn.Spec.Network.Topology) + if !cudn.DeletionTimestamp.IsZero() { if controllerutil.ContainsFinalizer(cudn, template.FinalizerUserDefinedNetwork) { var errs []error @@ -564,6 +591,7 @@ func (c *Controller) syncClusterUDN(cudn *userdefinednetworkv1.ClusterUserDefine } klog.Infof("Finalizer removed from ClusterUserDefinedNetwork %q", cudn.Name) delete(c.namespaceTracker, cudnName) + metrics.DecrementCUDNCount(role, topology) } return nil, nil @@ -581,6 +609,7 @@ func (c *Controller) syncClusterUDN(cudn *userdefinednetworkv1.ClusterUserDefine return nil, fmt.Errorf("failed to add finalizer to ClusterUserDefinedNetwork %q: %w", cudnName, err) } klog.Infof("Added Finalizer to ClusterUserDefinedNetwork %q", cudnName) + metrics.IncrementCUDNCount(role, topology) } selectedNamespaces, err := c.getSelectedNamespaces(cudn.Spec.NamespaceSelector) diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go index 8d11c0960b..0b3aa61194 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template.go @@ -166,6 +166,9 @@ func renderCNINetworkConfig(networkName, nadName string, spec SpecGetter) (map[s netConfSpec.VLANID = int(cfg.VLAN.Access.ID) } } + if netConfSpec.AllowPersistentIPs && !config.OVNKubernetesFeature.EnablePersistentIPs { + return nil, fmt.Errorf("allowPersistentIPs is set but persistentIPs is Disabled") + } if err := util.ValidateNetConf(nadName, netConfSpec); err != nil { return nil, err @@ -192,7 +195,7 @@ func renderCNINetworkConfig(networkName, nadName string, spec SpecGetter) (map[s cniNetConf["mtu"] = mtu } if len(netConfSpec.JoinSubnet) > 0 { - cniNetConf["joinSubnets"] = netConfSpec.JoinSubnet + cniNetConf["joinSubnet"] = netConfSpec.JoinSubnet } if len(netConfSpec.Subnets) > 0 { cniNetConf["subnets"] = netConfSpec.Subnets diff --git a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go index 0c06f3a270..ab0593e210 100644 --- a/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go +++ b/go-controller/pkg/clustermanager/userdefinednetwork/template/net-attach-def-template_test.go @@ -301,6 +301,7 @@ var _ = Describe("NetAttachDefTemplate", func() { // must be defined so the primary user defined network can match the ip families of the underlying cluster config.IPv4Mode = true config.IPv6Mode = true + config.OVNKubernetesFeature.EnablePersistentIPs = true nad, err := RenderNetAttachDefManifest(testUdn, testNs) Expect(err).NotTo(HaveOccurred()) Expect(nad.TypeMeta).To(Equal(expectedNAD.TypeMeta)) @@ -326,7 +327,7 @@ var _ = Describe("NetAttachDefTemplate", func() { "netAttachDefName": "mynamespace/test-net", "role": "primary", "topology": "layer3", - "joinSubnets": "100.65.0.0/16,fd99::/64", + "joinSubnet": "100.65.0.0/16,fd99::/64", "subnets": "192.168.100.0/16,2001:dbb::/60", "mtu": 1500 }`, @@ -350,7 +351,7 @@ var _ = Describe("NetAttachDefTemplate", func() { "netAttachDefName": "mynamespace/test-net", "role": "primary", "topology": "layer2", - "joinSubnets": "100.65.0.0/16,fd99::/64", + "joinSubnet": "100.65.0.0/16,fd99::/64", "subnets": "192.168.100.0/24,2001:dbb::/64", "mtu": 1500, "allowPersistentIPs": true @@ -376,7 +377,7 @@ var _ = Describe("NetAttachDefTemplate", func() { "netAttachDefName": "mynamespace/test-net", "role": "primary", "topology": "layer2", - "joinSubnets": "100.62.0.0/24,fd92::/64", + "joinSubnet": "100.62.0.0/24,fd92::/64", "subnets": "192.168.100.0/24,2001:dbb::/64", "mtu": 1500, "allowPersistentIPs": true @@ -436,6 +437,7 @@ var _ = Describe("NetAttachDefTemplate", func() { // must be defined so the primary user defined network can match the ip families of the underlying cluster config.IPv4Mode = true config.IPv6Mode = true + config.OVNKubernetesFeature.EnablePersistentIPs = true nad, err := RenderNetAttachDefManifest(cudn, testNs) Expect(err).NotTo(HaveOccurred()) Expect(nad.TypeMeta).To(Equal(expectedNAD.TypeMeta)) @@ -461,7 +463,7 @@ var _ = Describe("NetAttachDefTemplate", func() { "netAttachDefName": "mynamespace/test-net", "role": "primary", "topology": "layer3", - "joinSubnets": "100.65.0.0/16,fd99::/64", + "joinSubnet": "100.65.0.0/16,fd99::/64", "subnets": "192.168.100.0/16,2001:dbb::/60", "mtu": 1500 }`, @@ -485,7 +487,7 @@ var _ = Describe("NetAttachDefTemplate", func() { "netAttachDefName": "mynamespace/test-net", "role": "primary", "topology": "layer2", - "joinSubnets": "100.65.0.0/16,fd99::/64", + "joinSubnet": "100.65.0.0/16,fd99::/64", "subnets": "192.168.100.0/24,2001:dbb::/64", "mtu": 1500, "allowPersistentIPs": true @@ -511,7 +513,7 @@ var _ = Describe("NetAttachDefTemplate", func() { "netAttachDefName": "mynamespace/test-net", "role": "primary", "topology": "layer2", - "joinSubnets": "100.62.0.0/24,fd92::/64", + "joinSubnet": "100.62.0.0/24,fd92::/64", "subnets": "192.168.100.0/24,2001:dbb::/64", "mtu": 1500, "allowPersistentIPs": true diff --git a/go-controller/pkg/cni/cni.go b/go-controller/pkg/cni/cni.go index e2cc865265..faf800d52e 100644 --- a/go-controller/pkg/cni/cni.go +++ b/go-controller/pkg/cni/cni.go @@ -15,7 +15,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/udn" diff --git a/go-controller/pkg/cni/cni_test.go b/go-controller/pkg/cni/cni_test.go index ed3f5be1f0..778c83c03c 100644 --- a/go-controller/pkg/cni/cni_test.go +++ b/go-controller/pkg/cni/cni_test.go @@ -15,7 +15,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes/fake" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/cni/cniserver.go b/go-controller/pkg/cni/cniserver.go index 4ec851bbb7..17b888dd63 100644 --- a/go-controller/pkg/cni/cniserver.go +++ b/go-controller/pkg/cni/cniserver.go @@ -16,7 +16,7 @@ import ( corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/cni/cniserver_test.go b/go-controller/pkg/cni/cniserver_test.go index 65c9962e2a..6edfbf49e2 100644 --- a/go-controller/pkg/cni/cniserver_test.go +++ b/go-controller/pkg/cni/cniserver_test.go @@ -23,7 +23,7 @@ import ( "k8s.io/client-go/kubernetes/fake" utiltesting "k8s.io/client-go/util/testing" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" diff --git a/go-controller/pkg/cni/types.go b/go-controller/pkg/cni/types.go index f6c5e10727..7a20787d73 100644 --- a/go-controller/pkg/cni/types.go +++ b/go-controller/pkg/cni/types.go @@ -14,7 +14,7 @@ import ( corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" diff --git a/go-controller/pkg/config/cni.go b/go-controller/pkg/config/cni.go index 3d935c5c6a..3bec2d286f 100644 --- a/go-controller/pkg/config/cni.go +++ b/go-controller/pkg/config/cni.go @@ -120,10 +120,6 @@ func parseNetConfSingle(bytes []byte) (*ovncnitypes.NetConf, error) { } func parseNetConfList(confList *libcni.NetworkConfigList) (*ovncnitypes.NetConf, error) { - if len(confList.Plugins) > 1 { - return nil, ErrorChainingNotSupported - } - netconf := &ovncnitypes.NetConf{MTU: Default.MTU} if err := json.Unmarshal(confList.Plugins[0].Bytes, netconf); err != nil { return nil, err @@ -134,6 +130,10 @@ func parseNetConfList(confList *libcni.NetworkConfigList) (*ovncnitypes.NetConf, return nil, ErrorAttachDefNotOvnManaged } + if len(confList.Plugins) > 1 { + return nil, ErrorChainingNotSupported + } + netconf.Name = confList.Name netconf.CNIVersion = confList.CNIVersion diff --git a/go-controller/pkg/config/config.go b/go-controller/pkg/config/config.go index c7df666cbc..89757b864b 100644 --- a/go-controller/pkg/config/config.go +++ b/go-controller/pkg/config/config.go @@ -38,6 +38,9 @@ const DefaultVXLANPort = 4789 const DefaultDBTxnTimeout = time.Second * 100 +// DefaultEphemeralPortRange is used for unit testing only +const DefaultEphemeralPortRange = "32768-60999" + // The following are global config parameters that other modules may access directly var ( // Build information. Populated at build-time. @@ -420,19 +423,17 @@ type OVNKubernetesFeatureConfig struct { EgressIPNodeHealthCheckPort int `gcfg:"egressip-node-healthcheck-port"` EnableMultiNetwork bool `gcfg:"enable-multi-network"` EnableNetworkSegmentation bool `gcfg:"enable-network-segmentation"` + EnablePreconfiguredUDNAddresses bool `gcfg:"enable-preconfigured-udn-addresses"` EnableRouteAdvertisements bool `gcfg:"enable-route-advertisements"` - // This feature requires a kernel fix https://github.com/torvalds/linux/commit/7f3287db654395f9c5ddd246325ff7889f550286 - // to work on a kind cluster. Flag allows to disable it for current CI, will be turned on when github runners have this fix. - DisableUDNHostIsolation bool `gcfg:"disable-udn-host-isolation"` - EnableMultiNetworkPolicy bool `gcfg:"enable-multi-networkpolicy"` - EnableStatelessNetPol bool `gcfg:"enable-stateless-netpol"` - EnableInterconnect bool `gcfg:"enable-interconnect"` - EnableMultiExternalGateway bool `gcfg:"enable-multi-external-gateway"` - EnablePersistentIPs bool `gcfg:"enable-persistent-ips"` - EnableDNSNameResolver bool `gcfg:"enable-dns-name-resolver"` - EnableServiceTemplateSupport bool `gcfg:"enable-svc-template-support"` - EnableObservability bool `gcfg:"enable-observability"` - EnableNetworkQoS bool `gcfg:"enable-network-qos"` + EnableMultiNetworkPolicy bool `gcfg:"enable-multi-networkpolicy"` + EnableStatelessNetPol bool `gcfg:"enable-stateless-netpol"` + EnableInterconnect bool `gcfg:"enable-interconnect"` + EnableMultiExternalGateway bool `gcfg:"enable-multi-external-gateway"` + EnablePersistentIPs bool `gcfg:"enable-persistent-ips"` + EnableDNSNameResolver bool `gcfg:"enable-dns-name-resolver"` + EnableServiceTemplateSupport bool `gcfg:"enable-svc-template-support"` + EnableObservability bool `gcfg:"enable-observability"` + EnableNetworkQoS bool `gcfg:"enable-network-qos"` } // GatewayMode holds the node gateway mode @@ -494,6 +495,10 @@ type GatewayConfig struct { DisableForwarding bool `gcfg:"disable-forwarding"` // AllowNoUplink (disabled by default) controls if the external gateway bridge without an uplink port is allowed in local gateway mode. AllowNoUplink bool `gcfg:"allow-no-uplink"` + // EphemeralPortRange is the range of ports used by egress SNAT operations in OVN. Specifically for NAT where + // the source IP of the NAT will be a shared Node IP address. If unset, the value will be determined by sysctl lookup + // for the kernel's ephemeral range: net.ipv4.ip_local_port_range. Format is "-". + EphemeralPortRange string `gfcg:"ephemeral-port-range"` } // OvnAuthConfig holds client authentication and location details for @@ -664,6 +669,9 @@ func PrepareTestConfig() error { Kubernetes.DisableRequestedChassis = false EnableMulticast = false Default.OVSDBTxnTimeout = 5 * time.Second + if Gateway.Mode != GatewayModeDisabled { + Gateway.EphemeralPortRange = DefaultEphemeralPortRange + } if err := completeConfig(); err != nil { return err @@ -1076,18 +1084,18 @@ var OVNK8sFeatureFlags = []cli.Flag{ Destination: &cliConfig.OVNKubernetesFeature.EnableMultiNetworkPolicy, Value: OVNKubernetesFeature.EnableMultiNetworkPolicy, }, - &cli.BoolFlag{ - Name: "disable-udn-host-isolation", - Usage: "Configure to disable UDN host isolation with ovn-kubernetes.", - Destination: &cliConfig.OVNKubernetesFeature.DisableUDNHostIsolation, - Value: OVNKubernetesFeature.DisableUDNHostIsolation, - }, &cli.BoolFlag{ Name: "enable-network-segmentation", Usage: "Configure to use network segmentation feature with ovn-kubernetes.", Destination: &cliConfig.OVNKubernetesFeature.EnableNetworkSegmentation, Value: OVNKubernetesFeature.EnableNetworkSegmentation, }, + &cli.BoolFlag{ + Name: "enable-preconfigured-udn-addresses", + Usage: "Enable workloads connect to user-defined network with preconfigured addresses.", + Destination: &cliConfig.OVNKubernetesFeature.EnablePreconfiguredUDNAddresses, + Value: OVNKubernetesFeature.EnablePreconfiguredUDNAddresses, + }, &cli.BoolFlag{ Name: "enable-route-advertisements", Usage: "Configure to use route advertisements feature with ovn-kubernetes.", @@ -1509,6 +1517,14 @@ var OVNGatewayFlags = []cli.Flag{ Usage: "Allow the external gateway bridge without an uplink port in local gateway mode", Destination: &cliConfig.Gateway.AllowNoUplink, }, + &cli.StringFlag{ + Name: "ephemeral-port-range", + Usage: "The port range in '-' format for OVN to use when SNAT'ing to a node IP. " + + "This range should not collide with the node port range being used in Kubernetes. If not provided, " + + "the default value will be derived from checking the sysctl value of net.ipv4.ip_local_port_range on the node.", + Destination: &cliConfig.Gateway.EphemeralPortRange, + Value: Gateway.EphemeralPortRange, + }, // Deprecated CLI options &cli.BoolFlag{ Name: "init-gateways", @@ -1917,6 +1933,19 @@ func buildGatewayConfig(ctx *cli.Context, cli, file *config) error { if !found { return fmt.Errorf("invalid gateway mode %q: expect one of %s", string(Gateway.Mode), strings.Join(validModes, ",")) } + + if len(Gateway.EphemeralPortRange) > 0 { + if !isValidEphemeralPortRange(Gateway.EphemeralPortRange) { + return fmt.Errorf("invalid ephemeral-port-range, should be in the format -") + } + } else { + // auto-detect ephermal range + portRange, err := getKernelEphemeralPortRange() + if err != nil { + return fmt.Errorf("unable to auto-detect ephemeral port range to use with OVN") + } + Gateway.EphemeralPortRange = portRange + } } // Options are only valid if Mode is not disabled @@ -1927,6 +1956,9 @@ func buildGatewayConfig(ctx *cli.Context, cli, file *config) error { if Gateway.NextHop != "" { return fmt.Errorf("gateway next-hop option %q not allowed when gateway is disabled", Gateway.NextHop) } + if len(Gateway.EphemeralPortRange) > 0 { + return fmt.Errorf("gateway ephemeral port range option not allowed when gateway is disabled") + } } if Gateway.Mode != GatewayModeShared && Gateway.VLANID != 0 { diff --git a/go-controller/pkg/config/config_test.go b/go-controller/pkg/config/config_test.go index ddfaf84e65..39e7fa41bc 100644 --- a/go-controller/pkg/config/config_test.go +++ b/go-controller/pkg/config/config_test.go @@ -227,6 +227,7 @@ egressip-node-healthcheck-port=1234 enable-multi-network=false enable-multi-networkpolicy=false enable-network-segmentation=false +enable-preconfigured-udn-addresses=false enable-route-advertisements=false enable-interconnect=false enable-multi-external-gateway=false @@ -338,6 +339,7 @@ var _ = Describe("Config Operations", func() { gomega.Expect(OVNKubernetesFeature.EgressIPNodeHealthCheckPort).To(gomega.Equal(0)) gomega.Expect(OVNKubernetesFeature.EnableMultiNetwork).To(gomega.BeFalse()) gomega.Expect(OVNKubernetesFeature.EnableNetworkSegmentation).To(gomega.BeFalse()) + gomega.Expect(OVNKubernetesFeature.EnablePreconfiguredUDNAddresses).To(gomega.BeFalse()) gomega.Expect(OVNKubernetesFeature.EnableRouteAdvertisements).To(gomega.BeFalse()) gomega.Expect(OVNKubernetesFeature.EnableMultiNetworkPolicy).To(gomega.BeFalse()) gomega.Expect(OVNKubernetesFeature.EnableInterconnect).To(gomega.BeFalse()) @@ -597,6 +599,7 @@ var _ = Describe("Config Operations", func() { "enable-multi-network=true", "enable-multi-networkpolicy=true", "enable-network-segmentation=true", + "enable-preconfigured-udn-addresses=true", "enable-route-advertisements=true", "enable-interconnect=true", "enable-multi-external-gateway=true", @@ -687,6 +690,7 @@ var _ = Describe("Config Operations", func() { gomega.Expect(OVNKubernetesFeature.EgressIPNodeHealthCheckPort).To(gomega.Equal(1234)) gomega.Expect(OVNKubernetesFeature.EnableMultiNetwork).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableNetworkSegmentation).To(gomega.BeTrue()) + gomega.Expect(OVNKubernetesFeature.EnablePreconfiguredUDNAddresses).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableRouteAdvertisements).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableInterconnect).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableMultiExternalGateway).To(gomega.BeTrue()) @@ -794,6 +798,7 @@ var _ = Describe("Config Operations", func() { gomega.Expect(OVNKubernetesFeature.EgressIPNodeHealthCheckPort).To(gomega.Equal(4321)) gomega.Expect(OVNKubernetesFeature.EnableMultiNetwork).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableNetworkSegmentation).To(gomega.BeTrue()) + gomega.Expect(OVNKubernetesFeature.EnablePreconfiguredUDNAddresses).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableRouteAdvertisements).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableMultiNetworkPolicy).To(gomega.BeTrue()) gomega.Expect(OVNKubernetesFeature.EnableInterconnect).To(gomega.BeTrue()) @@ -869,6 +874,7 @@ var _ = Describe("Config Operations", func() { "-enable-multi-network=true", "-enable-multi-networkpolicy=true", "-enable-network-segmentation=true", + "-enable-preconfigured-udn-addresses=true", "-enable-route-advertisements=true", "-enable-interconnect=true", "-enable-multi-external-gateway=true", diff --git a/go-controller/pkg/config/utils.go b/go-controller/pkg/config/utils.go index 7ff8eff484..f0f0ff1a6b 100644 --- a/go-controller/pkg/config/utils.go +++ b/go-controller/pkg/config/utils.go @@ -3,7 +3,9 @@ package config import ( "fmt" "net" + "os" "reflect" + "regexp" "strconv" "strings" @@ -328,3 +330,49 @@ func AllocateV6MasqueradeIPs(masqueradeSubnetNetworkAddress net.IP, masqueradeIP } return nil } + +func isValidEphemeralPortRange(s string) bool { + // Regex to match "-" with no extra characters + re := regexp.MustCompile(`^(\d{1,5})-(\d{1,5})$`) + matches := re.FindStringSubmatch(s) + if matches == nil { + return false + } + + minPort, err1 := strconv.Atoi(matches[1]) + maxPort, err2 := strconv.Atoi(matches[2]) + if err1 != nil || err2 != nil { + return false + } + + // Port numbers must be in the 1-65535 range + if minPort < 1 || minPort > 65535 || maxPort < 0 || maxPort > 65535 { + return false + } + + return maxPort > minPort +} + +func getKernelEphemeralPortRange() (string, error) { + data, err := os.ReadFile("/proc/sys/net/ipv4/ip_local_port_range") + if err != nil { + return "", fmt.Errorf("failed to read port range: %w", err) + } + + parts := strings.Fields(string(data)) + if len(parts) != 2 { + return "", fmt.Errorf("unexpected format: %q", string(data)) + } + + minPort, err := strconv.Atoi(parts[0]) + if err != nil { + return "", fmt.Errorf("invalid min port: %w", err) + } + + maxPort, err := strconv.Atoi(parts[1]) + if err != nil { + return "", fmt.Errorf("invalid max port: %w", err) + } + + return fmt.Sprintf("%d-%d", minPort, maxPort), nil +} diff --git a/go-controller/pkg/controllermanager/controller_manager.go b/go-controller/pkg/controllermanager/controller_manager.go index 06d88c4ce4..27db274d05 100644 --- a/go-controller/pkg/controllermanager/controller_manager.go +++ b/go-controller/pkg/controllermanager/controller_manager.go @@ -14,7 +14,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -23,6 +23,7 @@ import ( libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/recorders" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability" @@ -438,7 +439,7 @@ func (cm *ControllerManager) Start(ctx context.Context) error { // with k=10, // for a cluster with 10 nodes, measurement of 1 in every 100 requests // for a cluster with 100 nodes, measurement of 1 in every 1000 requests - metrics.GetConfigDurationRecorder().Run(cm.nbClient, cm.kube, 10, time.Second*5, cm.stopChan) + recorders.GetConfigDurationRecorder().Run(cm.nbClient, cm.watchFactory, 10, time.Second*5, cm.stopChan) } cm.podRecorder.Run(cm.sbClient, cm.stopChan) diff --git a/go-controller/pkg/controllermanager/node_controller_manager.go b/go-controller/pkg/controllermanager/node_controller_manager.go index d3af2b3b40..aca81d30f7 100644 --- a/go-controller/pkg/controllermanager/node_controller_manager.go +++ b/go-controller/pkg/controllermanager/node_controller_manager.go @@ -13,7 +13,7 @@ import ( "k8s.io/klog/v2" kexec "k8s.io/utils/exec" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/controllermanager/node_controller_manager_test.go b/go-controller/pkg/controllermanager/node_controller_manager_test.go index cf96448fe9..92d51a25d8 100644 --- a/go-controller/pkg/controllermanager/node_controller_manager_test.go +++ b/go-controller/pkg/controllermanager/node_controller_manager_test.go @@ -228,7 +228,7 @@ var _ = Describe("Healthcheck tests", func() { }, } nodeList := []*corev1.Node{node} - factoryMock.On("GetNode", nodeName).Return(nodeList[0], nil) + factoryMock.On("GetNodeForWindows", nodeName).Return(nodeList[0], nil) factoryMock.On("GetNodes").Return(nodeList, nil) factoryMock.On("UserDefinedNetworkInformer").Return(nil) factoryMock.On("ClusterUserDefinedNetworkInformer").Return(nil) diff --git a/go-controller/pkg/factory/handler.go b/go-controller/pkg/factory/handler.go index 1e87f7309b..50563b3278 100644 --- a/go-controller/pkg/factory/handler.go +++ b/go-controller/pkg/factory/handler.go @@ -76,6 +76,10 @@ func (h *Handler) OnDelete(obj interface{}) { } } +func (h *Handler) FilterFunc(obj interface{}) bool { + return h.base.FilterFunc(obj) +} + func (h *Handler) kill() bool { return atomic.CompareAndSwapUint32(&h.tombstone, handlerAlive, handlerDead) } diff --git a/go-controller/pkg/kube/annotator_test.go b/go-controller/pkg/kube/annotator_test.go index 0caa0956a0..4c66adb81c 100644 --- a/go-controller/pkg/kube/annotator_test.go +++ b/go-controller/pkg/kube/annotator_test.go @@ -79,7 +79,7 @@ var _ = Describe("Annotator", func() { err := nodeAnnot.Run() Expect(err).ToNot(HaveOccurred()) - node, err := kube.GetNode(nodeName) + node, err := kube.GetNodeForWindows(nodeName) Expect(err).ToNot(HaveOccurred()) // should contain initial annotations diff --git a/go-controller/pkg/kube/kube.go b/go-controller/pkg/kube/kube.go index 4171e398e2..7eccec3d7f 100644 --- a/go-controller/pkg/kube/kube.go +++ b/go-controller/pkg/kube/kube.go @@ -12,7 +12,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" @@ -62,12 +61,11 @@ type Interface interface { PatchNode(old, new *corev1.Node) error UpdateNodeStatus(node *corev1.Node) error UpdatePodStatus(pod *corev1.Pod) error - GetAnnotationsOnPod(namespace, name string) (map[string]string, error) - GetNodes() ([]*corev1.Node, error) - GetNamespaces(labelSelector metav1.LabelSelector) ([]*corev1.Namespace, error) - GetPods(namespace string, opts metav1.ListOptions) ([]*corev1.Pod, error) - GetPod(namespace, name string) (*corev1.Pod, error) - GetNode(name string) (*corev1.Node, error) + // GetPodsForDBChecker should only be used by legacy DB checker. Use watchFactory instead to get pods. + GetPodsForDBChecker(namespace string, opts metav1.ListOptions) ([]*corev1.Pod, error) + // GetNodeForWindows should only be used for windows hybrid overlay binary and never in linux code + GetNodeForWindows(name string) (*corev1.Node, error) + GetNodesForWindows() ([]*corev1.Node, error) Events() kv1core.EventInterface } @@ -201,7 +199,7 @@ func (k *Kube) SetAnnotationsOnService(namespace, name string, annotations map[s // SetTaintOnNode tries to add a new taint to the node. If the taint already exists, it doesn't do anything. func (k *Kube) SetTaintOnNode(nodeName string, taint *corev1.Taint) error { - node, err := k.GetNode(nodeName) + node, err := k.GetNodeForWindows(nodeName) if err != nil { klog.Errorf("Unable to retrieve node %s for tainting %s: %v", nodeName, taint.ToString(), err) return err @@ -234,7 +232,7 @@ func (k *Kube) SetTaintOnNode(nodeName string, taint *corev1.Taint) error { // RemoveTaintFromNode removes all the taints that have the same key and effect from the node. // If the taint doesn't exist, it doesn't do anything. func (k *Kube) RemoveTaintFromNode(nodeName string, taint *corev1.Taint) error { - node, err := k.GetNode(nodeName) + node, err := k.GetNodeForWindows(nodeName) if err != nil { klog.Errorf("Unable to retrieve node %s for tainting %s: %v", nodeName, taint.ToString(), err) return err @@ -324,32 +322,8 @@ func (k *Kube) UpdatePodStatus(pod *corev1.Pod) error { return err } -// GetAnnotationsOnPod obtains the pod annotations from kubernetes apiserver, given the name and namespace -func (k *Kube) GetAnnotationsOnPod(namespace, name string) (map[string]string, error) { - pod, err := k.KClient.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return pod.ObjectMeta.Annotations, nil -} - -// GetNamespaces returns the list of all Namespace objects matching the labelSelector -func (k *Kube) GetNamespaces(labelSelector metav1.LabelSelector) ([]*corev1.Namespace, error) { - list := []*corev1.Namespace{} - err := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { - return k.KClient.CoreV1().Namespaces().List(ctx, opts) - }).EachListItem(context.TODO(), metav1.ListOptions{ - LabelSelector: labels.Set(labelSelector.MatchLabels).String(), - ResourceVersion: "0", - }, func(obj runtime.Object) error { - list = append(list, obj.(*corev1.Namespace)) - return nil - }) - return list, err -} - -// GetPods returns the list of all Pod objects in a namespace matching the options -func (k *Kube) GetPods(namespace string, opts metav1.ListOptions) ([]*corev1.Pod, error) { +// GetPodsForDBChecker returns the list of all Pod objects in a namespace matching the options. Only used by the legacy db checker. +func (k *Kube) GetPodsForDBChecker(namespace string, opts metav1.ListOptions) ([]*corev1.Pod, error) { list := []*corev1.Pod{} opts.ResourceVersion = "0" err := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { @@ -361,13 +335,8 @@ func (k *Kube) GetPods(namespace string, opts metav1.ListOptions) ([]*corev1.Pod return list, err } -// GetPod obtains the pod from kubernetes apiserver, given the name and namespace -func (k *Kube) GetPod(namespace, name string) (*corev1.Pod, error) { - return k.KClient.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) -} - -// GetNodes returns the list of all Node objects from kubernetes -func (k *Kube) GetNodes() ([]*corev1.Node, error) { +// GetNodesForWindows returns the list of all Node objects from kubernetes. Only used by windows binary. +func (k *Kube) GetNodesForWindows() ([]*corev1.Node, error) { list := []*corev1.Node{} err := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { return k.KClient.CoreV1().Nodes().List(ctx, opts) @@ -380,8 +349,8 @@ func (k *Kube) GetNodes() ([]*corev1.Node, error) { return list, err } -// GetNode returns the Node resource from kubernetes apiserver, given its name -func (k *Kube) GetNode(name string) (*corev1.Node, error) { +// GetNodeForWindows returns the Node resource from kubernetes apiserver, given its name. Only used by windows binary. +func (k *Kube) GetNodeForWindows(name string) (*corev1.Node, error) { return k.KClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) } diff --git a/go-controller/pkg/kube/kube_test.go b/go-controller/pkg/kube/kube_test.go index 4741cef0ed..d93119ff3a 100644 --- a/go-controller/pkg/kube/kube_test.go +++ b/go-controller/pkg/kube/kube_test.go @@ -96,7 +96,7 @@ var _ = Describe("Kube", func() { err := kube.SetTaintOnNode(node.Name, &taint) Expect(err).ToNot(HaveOccurred()) - updatedNode, err := kube.GetNode(node.Name) + updatedNode, err := kube.GetNodeForWindows(node.Name) Expect(err).ToNot(HaveOccurred()) Expect(updatedNode.Spec.Taints).To(Equal([]corev1.Taint{taint})) }) diff --git a/go-controller/pkg/kube/mocks/Interface.go b/go-controller/pkg/kube/mocks/Interface.go index 6631ca44c0..594d33d699 100644 --- a/go-controller/pkg/kube/mocks/Interface.go +++ b/go-controller/pkg/kube/mocks/Interface.go @@ -37,72 +37,12 @@ func (_m *Interface) Events() v1.EventInterface { return r0 } -// GetAnnotationsOnPod provides a mock function with given fields: namespace, name -func (_m *Interface) GetAnnotationsOnPod(namespace string, name string) (map[string]string, error) { - ret := _m.Called(namespace, name) - - if len(ret) == 0 { - panic("no return value specified for GetAnnotationsOnPod") - } - - var r0 map[string]string - var r1 error - if rf, ok := ret.Get(0).(func(string, string) (map[string]string, error)); ok { - return rf(namespace, name) - } - if rf, ok := ret.Get(0).(func(string, string) map[string]string); ok { - r0 = rf(namespace, name) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]string) - } - } - - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(namespace, name) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetNamespaces provides a mock function with given fields: labelSelector -func (_m *Interface) GetNamespaces(labelSelector metav1.LabelSelector) ([]*corev1.Namespace, error) { - ret := _m.Called(labelSelector) - - if len(ret) == 0 { - panic("no return value specified for GetNamespaces") - } - - var r0 []*corev1.Namespace - var r1 error - if rf, ok := ret.Get(0).(func(metav1.LabelSelector) ([]*corev1.Namespace, error)); ok { - return rf(labelSelector) - } - if rf, ok := ret.Get(0).(func(metav1.LabelSelector) []*corev1.Namespace); ok { - r0 = rf(labelSelector) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*corev1.Namespace) - } - } - - if rf, ok := ret.Get(1).(func(metav1.LabelSelector) error); ok { - r1 = rf(labelSelector) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // GetNode provides a mock function with given fields: name -func (_m *Interface) GetNode(name string) (*corev1.Node, error) { +func (_m *Interface) GetNodeForWindows(name string) (*corev1.Node, error) { ret := _m.Called(name) if len(ret) == 0 { - panic("no return value specified for GetNode") + panic("no return value specified for GetNodeForWindows") } var r0 *corev1.Node @@ -127,12 +67,12 @@ func (_m *Interface) GetNode(name string) (*corev1.Node, error) { return r0, r1 } -// GetNodes provides a mock function with given fields: -func (_m *Interface) GetNodes() ([]*corev1.Node, error) { +// GetNodesForWindows provides a mock function with given fields: +func (_m *Interface) GetNodesForWindows() ([]*corev1.Node, error) { ret := _m.Called() if len(ret) == 0 { - panic("no return value specified for GetNodes") + panic("no return value specified for GetNodesForWindows") } var r0 []*corev1.Node @@ -157,42 +97,12 @@ func (_m *Interface) GetNodes() ([]*corev1.Node, error) { return r0, r1 } -// GetPod provides a mock function with given fields: namespace, name -func (_m *Interface) GetPod(namespace string, name string) (*corev1.Pod, error) { - ret := _m.Called(namespace, name) - - if len(ret) == 0 { - panic("no return value specified for GetPod") - } - - var r0 *corev1.Pod - var r1 error - if rf, ok := ret.Get(0).(func(string, string) (*corev1.Pod, error)); ok { - return rf(namespace, name) - } - if rf, ok := ret.Get(0).(func(string, string) *corev1.Pod); ok { - r0 = rf(namespace, name) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*corev1.Pod) - } - } - - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(namespace, name) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // GetPods provides a mock function with given fields: namespace, opts -func (_m *Interface) GetPods(namespace string, opts metav1.ListOptions) ([]*corev1.Pod, error) { +func (_m *Interface) GetPodsForDBChecker(namespace string, opts metav1.ListOptions) ([]*corev1.Pod, error) { ret := _m.Called(namespace, opts) if len(ret) == 0 { - panic("no return value specified for GetPods") + panic("no return value specified for GetPodsForDBChecker") } var r0 []*corev1.Pod diff --git a/go-controller/pkg/kube/mocks/InterfaceOVN.go b/go-controller/pkg/kube/mocks/InterfaceOVN.go index 14a8a33af6..18e93ed800 100644 --- a/go-controller/pkg/kube/mocks/InterfaceOVN.go +++ b/go-controller/pkg/kube/mocks/InterfaceOVN.go @@ -91,36 +91,6 @@ func (_m *InterfaceOVN) Events() corev1.EventInterface { return r0 } -// GetAnnotationsOnPod provides a mock function with given fields: namespace, name -func (_m *InterfaceOVN) GetAnnotationsOnPod(namespace string, name string) (map[string]string, error) { - ret := _m.Called(namespace, name) - - if len(ret) == 0 { - panic("no return value specified for GetAnnotationsOnPod") - } - - var r0 map[string]string - var r1 error - if rf, ok := ret.Get(0).(func(string, string) (map[string]string, error)); ok { - return rf(namespace, name) - } - if rf, ok := ret.Get(0).(func(string, string) map[string]string); ok { - r0 = rf(namespace, name) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]string) - } - } - - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(namespace, name) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // GetEgressFirewalls provides a mock function with given fields: func (_m *InterfaceOVN) GetEgressFirewalls() ([]*egressfirewallv1.EgressFirewall, error) { ret := _m.Called() @@ -211,42 +181,12 @@ func (_m *InterfaceOVN) GetEgressIPs() ([]*egressipv1.EgressIP, error) { return r0, r1 } -// GetNamespaces provides a mock function with given fields: labelSelector -func (_m *InterfaceOVN) GetNamespaces(labelSelector metav1.LabelSelector) ([]*apicorev1.Namespace, error) { - ret := _m.Called(labelSelector) - - if len(ret) == 0 { - panic("no return value specified for GetNamespaces") - } - - var r0 []*apicorev1.Namespace - var r1 error - if rf, ok := ret.Get(0).(func(metav1.LabelSelector) ([]*apicorev1.Namespace, error)); ok { - return rf(labelSelector) - } - if rf, ok := ret.Get(0).(func(metav1.LabelSelector) []*apicorev1.Namespace); ok { - r0 = rf(labelSelector) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*apicorev1.Namespace) - } - } - - if rf, ok := ret.Get(1).(func(metav1.LabelSelector) error); ok { - r1 = rf(labelSelector) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // GetNode provides a mock function with given fields: name -func (_m *InterfaceOVN) GetNode(name string) (*apicorev1.Node, error) { +func (_m *InterfaceOVN) GetNodeForWindows(name string) (*apicorev1.Node, error) { ret := _m.Called(name) if len(ret) == 0 { - panic("no return value specified for GetNode") + panic("no return value specified for GetNodeForWindows") } var r0 *apicorev1.Node @@ -271,8 +211,8 @@ func (_m *InterfaceOVN) GetNode(name string) (*apicorev1.Node, error) { return r0, r1 } -// GetNodes provides a mock function with given fields: -func (_m *InterfaceOVN) GetNodes() ([]*apicorev1.Node, error) { +// GetNodesForWindows provides a mock function with given fields: +func (_m *InterfaceOVN) GetNodesForWindows() ([]*apicorev1.Node, error) { ret := _m.Called() if len(ret) == 0 { @@ -301,42 +241,12 @@ func (_m *InterfaceOVN) GetNodes() ([]*apicorev1.Node, error) { return r0, r1 } -// GetPod provides a mock function with given fields: namespace, name -func (_m *InterfaceOVN) GetPod(namespace string, name string) (*apicorev1.Pod, error) { - ret := _m.Called(namespace, name) - - if len(ret) == 0 { - panic("no return value specified for GetPod") - } - - var r0 *apicorev1.Pod - var r1 error - if rf, ok := ret.Get(0).(func(string, string) (*apicorev1.Pod, error)); ok { - return rf(namespace, name) - } - if rf, ok := ret.Get(0).(func(string, string) *apicorev1.Pod); ok { - r0 = rf(namespace, name) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*apicorev1.Pod) - } - } - - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(namespace, name) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // GetPods provides a mock function with given fields: namespace, opts -func (_m *InterfaceOVN) GetPods(namespace string, opts metav1.ListOptions) ([]*apicorev1.Pod, error) { +func (_m *InterfaceOVN) GetPodsForDBChecker(namespace string, opts metav1.ListOptions) ([]*apicorev1.Pod, error) { ret := _m.Called(namespace, opts) if len(ret) == 0 { - panic("no return value specified for GetPods") + panic("no return value specified for GetPodsForDBChecker") } var r0 []*apicorev1.Pod diff --git a/go-controller/pkg/kubevirt/dhcp.go b/go-controller/pkg/kubevirt/dhcp.go index 5e8534bd71..51cb600ebd 100644 --- a/go-controller/pkg/kubevirt/dhcp.go +++ b/go-controller/pkg/kubevirt/dhcp.go @@ -9,7 +9,7 @@ import ( ktypes "k8s.io/apimachinery/pkg/types" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/kubevirt/pod.go b/go-controller/pkg/kubevirt/pod.go index 48cd5ed1c2..901d28ca74 100644 --- a/go-controller/pkg/kubevirt/pod.go +++ b/go-controller/pkg/kubevirt/pod.go @@ -13,7 +13,7 @@ import ( ktypes "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" @@ -470,11 +470,15 @@ func DiscoverLiveMigrationStatus(client *factory.WatchFactory, pod *corev1.Pod) targetPod := vmPods[len(vmPods)-1] livingPods := filterNotComplete(vmPods) + + // If there is no living pod we should state no live migration status + if len(livingPods) == 0 { + return nil, nil + } + + // There is a living pod but is not the target one so the migration + // has failed. if util.PodCompleted(targetPod) { - // if target pod failed, then there should be only one living source pod. - if len(livingPods) != 1 { - return nil, fmt.Errorf("unexpected live migration state: should have a single living pod") - } return &LiveMigrationStatus{ SourcePod: livingPods[0], TargetPod: targetPod, diff --git a/go-controller/pkg/kubevirt/pod_test.go b/go-controller/pkg/kubevirt/pod_test.go index 8db076019b..2bab9282f8 100644 --- a/go-controller/pkg/kubevirt/pod_test.go +++ b/go-controller/pkg/kubevirt/pod_test.go @@ -98,6 +98,11 @@ var _ = Describe("Kubevirt Pod", func() { pods: []corev1.Pod{successfullyMigratedKvSourcePod, failedMigrationKvTargetPod, successfulMigrationKvTargetPod}, }, ), + Entry("returns nil when there is all the pods are completed (not running vm after migration)", + testParams{ + pods: []corev1.Pod{completedKubevirtPod(t0), completedKubevirtPod(t1), completedKubevirtPod(t3)}, + }, + ), Entry("returns Migration in progress status when 2 pods are running, target pod is not yet ready", testParams{ pods: []corev1.Pod{runningKvSourcePod, duringMigrationKvTargetPod}, @@ -148,12 +153,6 @@ var _ = Describe("Kubevirt Pod", func() { }, }, ), - Entry("returns err when kubevirt VM has several living pods and target pod failed", - testParams{ - pods: []corev1.Pod{runningKvSourcePod, successfulMigrationKvTargetPod, anotherFailedMigrationKvTargetPod}, - expectedError: fmt.Errorf("unexpected live migration state: should have a single living pod"), - }, - ), Entry("returns err when kubevirt VM has several living pods", testParams{ pods: []corev1.Pod{runningKvSourcePod, duringMigrationKvTargetPod, yetAnotherDuringMigrationKvTargetPod}, diff --git a/go-controller/pkg/kubevirt/router.go b/go-controller/pkg/kubevirt/router.go index f6354d50f9..ed4a5dfab2 100644 --- a/go-controller/pkg/kubevirt/router.go +++ b/go-controller/pkg/kubevirt/router.go @@ -8,7 +8,7 @@ import ( corev1 "k8s.io/api/core/v1" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" @@ -95,7 +95,16 @@ func EnsureLocalZonePodAddressesToNodeRoute(watchFactory *factory.WatchFactory, if config.OVNKubernetesFeature.EnableInterconnect { // NOTE: EIP & ESVC use same route and if this is already present thanks to those features, // this will be a no-op - if err := libovsdbutil.CreateDefaultRouteToExternal(nbClient, types.OVNClusterRouter, types.GWRouterPrefix+pod.Spec.NodeName, clusterSubnets); err != nil { + node, err := watchFactory.GetNode(pod.Spec.NodeName) + if err != nil { + return fmt.Errorf("failed getting to list node %q for pod %s/%s: %w", pod.Spec.NodeName, pod.Namespace, pod.Name, err) + } + gatewayIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, types.DefaultNetworkName) + if err != nil { + return fmt.Errorf("failed to get default network gateway router join IPs for node %q: %w", node.Name, err) + } + if err := libovsdbutil.CreateDefaultRouteToExternal(nbClient, types.OVNClusterRouter, + types.GWRouterPrefix+pod.Spec.NodeName, clusterSubnets, gatewayIPs); err != nil { return err } } diff --git a/go-controller/pkg/libovsdb/libovsdb.go b/go-controller/pkg/libovsdb/libovsdb.go index 40bd1298fe..860ec26698 100644 --- a/go-controller/pkg/libovsdb/libovsdb.go +++ b/go-controller/pkg/libovsdb/libovsdb.go @@ -23,8 +23,8 @@ import ( "k8s.io/klog/v2" "k8s.io/klog/v2/textlogger" - "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/acl.go b/go-controller/pkg/libovsdb/ops/acl.go index cd671595b3..f9987fbeb7 100644 --- a/go-controller/pkg/libovsdb/ops/acl.go +++ b/go-controller/pkg/libovsdb/ops/acl.go @@ -4,8 +4,8 @@ import ( "context" "fmt" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/address_set.go b/go-controller/pkg/libovsdb/ops/address_set.go index c6a8ce16e7..90d251bbb5 100644 --- a/go-controller/pkg/libovsdb/ops/address_set.go +++ b/go-controller/pkg/libovsdb/ops/address_set.go @@ -3,8 +3,8 @@ package ops import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" @@ -18,7 +18,7 @@ type addressSetPredicate func(*nbdb.AddressSet) bool // The purpose is to prevent libovsdb interpreting non-nil empty maps/slices // as default and thus being filtered out of the update. The intention is to // use non-nil empty maps/slices to clear them out in the update. -// See: https://github.com/ovn-org/libovsdb/issues/226 +// See: https://github.com/ovn-kubernetes/libovsdb/issues/226 func getNonZeroAddressSetMutableFields(as *nbdb.AddressSet) []interface{} { fields := []interface{}{} if as.Addresses != nil { diff --git a/go-controller/pkg/libovsdb/ops/chassis.go b/go-controller/pkg/libovsdb/ops/chassis.go index 0196da3463..83a2d6a3c2 100644 --- a/go-controller/pkg/libovsdb/ops/chassis.go +++ b/go-controller/pkg/libovsdb/ops/chassis.go @@ -5,7 +5,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" diff --git a/go-controller/pkg/libovsdb/ops/copp.go b/go-controller/pkg/libovsdb/ops/copp.go index dac95c5c0e..a0f8697b1b 100644 --- a/go-controller/pkg/libovsdb/ops/copp.go +++ b/go-controller/pkg/libovsdb/ops/copp.go @@ -1,8 +1,8 @@ package ops import ( - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ) diff --git a/go-controller/pkg/libovsdb/ops/dhcp.go b/go-controller/pkg/libovsdb/ops/dhcp.go index 94ab12800a..cb03fde11c 100644 --- a/go-controller/pkg/libovsdb/ops/dhcp.go +++ b/go-controller/pkg/libovsdb/ops/dhcp.go @@ -1,8 +1,8 @@ package ops import ( - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ) diff --git a/go-controller/pkg/libovsdb/ops/lbgroup.go b/go-controller/pkg/libovsdb/ops/lbgroup.go index 8ab75c4d15..829cce5003 100644 --- a/go-controller/pkg/libovsdb/ops/lbgroup.go +++ b/go-controller/pkg/libovsdb/ops/lbgroup.go @@ -3,8 +3,8 @@ package ops import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/loadbalancer.go b/go-controller/pkg/libovsdb/ops/loadbalancer.go index 221e980b0b..b984c99fce 100644 --- a/go-controller/pkg/libovsdb/ops/loadbalancer.go +++ b/go-controller/pkg/libovsdb/ops/loadbalancer.go @@ -3,8 +3,8 @@ package ops import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" @@ -16,7 +16,7 @@ import ( // The purpose is to prevent libovsdb interpreting non-nil empty maps/slices // as default and thus being filtered out of the update. The intention is to // use non-nil empty maps/slices to clear them out in the update. -// See: https://github.com/ovn-org/libovsdb/issues/226 +// See: https://github.com/ovn-kubernetes/libovsdb/issues/226 func getNonZeroLoadBalancerMutableFields(lb *nbdb.LoadBalancer) []interface{} { fields := []interface{}{} if lb.Name != "" { diff --git a/go-controller/pkg/libovsdb/ops/mac_binding.go b/go-controller/pkg/libovsdb/ops/mac_binding.go index 1f7a76ba8b..0e1fe6718d 100644 --- a/go-controller/pkg/libovsdb/ops/mac_binding.go +++ b/go-controller/pkg/libovsdb/ops/mac_binding.go @@ -1,7 +1,7 @@ package ops import ( - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ) diff --git a/go-controller/pkg/libovsdb/ops/meter.go b/go-controller/pkg/libovsdb/ops/meter.go index d08b27ecc1..666da32fa8 100644 --- a/go-controller/pkg/libovsdb/ops/meter.go +++ b/go-controller/pkg/libovsdb/ops/meter.go @@ -5,8 +5,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ) diff --git a/go-controller/pkg/libovsdb/ops/model.go b/go-controller/pkg/libovsdb/ops/model.go index 76b525fa48..f266a16a64 100644 --- a/go-controller/pkg/libovsdb/ops/model.go +++ b/go-controller/pkg/libovsdb/ops/model.go @@ -4,9 +4,9 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" diff --git a/go-controller/pkg/libovsdb/ops/model_client.go b/go-controller/pkg/libovsdb/ops/model_client.go index 0668c20399..bf72f086d9 100644 --- a/go-controller/pkg/libovsdb/ops/model_client.go +++ b/go-controller/pkg/libovsdb/ops/model_client.go @@ -8,9 +8,9 @@ import ( "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" diff --git a/go-controller/pkg/libovsdb/ops/model_client_test.go b/go-controller/pkg/libovsdb/ops/model_client_test.go index 0ab218c631..52471d6408 100644 --- a/go-controller/pkg/libovsdb/ops/model_client_test.go +++ b/go-controller/pkg/libovsdb/ops/model_client_test.go @@ -8,9 +8,9 @@ import ( "github.com/onsi/gomega/types" - "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" diff --git a/go-controller/pkg/libovsdb/ops/nb_global.go b/go-controller/pkg/libovsdb/ops/nb_global.go index 88d962af0d..dc03be511e 100644 --- a/go-controller/pkg/libovsdb/ops/nb_global.go +++ b/go-controller/pkg/libovsdb/ops/nb_global.go @@ -1,7 +1,7 @@ package ops import ( - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ) diff --git a/go-controller/pkg/libovsdb/ops/options.go b/go-controller/pkg/libovsdb/ops/options.go new file mode 100644 index 0000000000..d960062e92 --- /dev/null +++ b/go-controller/pkg/libovsdb/ops/options.go @@ -0,0 +1,18 @@ +package ops + +// This is a list of options used for OVN operations. +// Started with adding only some of them, feel free to continue extending this list. +// Eventually we expect to have no string options in the code. +const ( + // RequestedTnlKey can be used by LogicalSwitch, LogicalSwitchPort, LogicalRouter and LogicalRouterPort + // for distributed switches/routers + RequestedTnlKey = "requested-tnl-key" + // RequestedChassis can be used by LogicalSwitchPort and LogicalRouterPort. + // It specifies the chassis (by name or hostname) that is allowed to bind this port. + RequestedChassis = "requested-chassis" + // RouterPort can be used by LogicalSwitchPort to specify a connection to a logical router. + RouterPort = "router-port" + // GatewayMTU can be used by LogicalRouterPort to specify the MTU for the gateway port. + // If set, logical flows will be added to router pipeline to check packet length. + GatewayMTU = "gateway_mtu" +) diff --git a/go-controller/pkg/libovsdb/ops/ovs/bridge.go b/go-controller/pkg/libovsdb/ops/ovs/bridge.go index d109f2b1b6..aa2deeb673 100644 --- a/go-controller/pkg/libovsdb/ops/ovs/bridge.go +++ b/go-controller/pkg/libovsdb/ops/ovs/bridge.go @@ -3,7 +3,7 @@ package ovs import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" diff --git a/go-controller/pkg/libovsdb/ops/ovs/interface.go b/go-controller/pkg/libovsdb/ops/ovs/interface.go index 797e7421ca..41259ef5a4 100644 --- a/go-controller/pkg/libovsdb/ops/ovs/interface.go +++ b/go-controller/pkg/libovsdb/ops/ovs/interface.go @@ -3,7 +3,7 @@ package ovs import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" diff --git a/go-controller/pkg/libovsdb/ops/ovs/openvswitch.go b/go-controller/pkg/libovsdb/ops/ovs/openvswitch.go index c6dd53a89f..a19df4e3fa 100644 --- a/go-controller/pkg/libovsdb/ops/ovs/openvswitch.go +++ b/go-controller/pkg/libovsdb/ops/ovs/openvswitch.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" diff --git a/go-controller/pkg/libovsdb/ops/portbinding.go b/go-controller/pkg/libovsdb/ops/portbinding.go deleted file mode 100644 index 861a63cb95..0000000000 --- a/go-controller/pkg/libovsdb/ops/portbinding.go +++ /dev/null @@ -1,53 +0,0 @@ -package ops - -import ( - "fmt" - - libovsdbclient "github.com/ovn-org/libovsdb/client" - - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" -) - -// UpdatePortBindingSetChassis sets the chassis column of the 'portBinding' row so that the OVN thinks that -// the port binding 'portBinding' is bound on the chassis. Ideally its ovn-controller which claims/binds -// a port binding. But for a remote chassis, we have to bind it as we created the remote chassis -// record for the remote zone nodes. -// TODO (numans) remove this function once OVN supports binding a port binding for a remote -// chassis. -func UpdatePortBindingSetChassis(sbClient libovsdbclient.Client, portBinding *sbdb.PortBinding, chassis *sbdb.Chassis) error { - ch, err := GetChassis(sbClient, chassis) - if err != nil { - return fmt.Errorf("failed to get chassis id %s(%s), error: %v", chassis.Name, chassis.Hostname, err) - } - portBinding.Chassis = &ch.UUID - - opModel := operationModel{ - Model: portBinding, - OnModelUpdates: []interface{}{&portBinding.Chassis}, - ErrNotFound: true, - BulkOp: false, - } - - m := newModelClient(sbClient) - _, err = m.CreateOrUpdate(opModel) - return err -} - -// GetPortBinding looks up a portBinding in SBDB -func GetPortBinding(sbClient libovsdbclient.Client, portBinding *sbdb.PortBinding) (*sbdb.PortBinding, error) { - found := []*sbdb.PortBinding{} - opModel := operationModel{ - Model: portBinding, - ExistingResult: &found, - ErrNotFound: true, - BulkOp: false, - } - - m := newModelClient(sbClient) - err := m.Lookup(opModel) - if err != nil { - return nil, err - } - - return found[0], nil -} diff --git a/go-controller/pkg/libovsdb/ops/portgroup.go b/go-controller/pkg/libovsdb/ops/portgroup.go index 37a6a782af..8a7cfb27f1 100644 --- a/go-controller/pkg/libovsdb/ops/portgroup.go +++ b/go-controller/pkg/libovsdb/ops/portgroup.go @@ -3,8 +3,8 @@ package ops import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/qos.go b/go-controller/pkg/libovsdb/ops/qos.go index 21d6a2f7f8..cfc1d0900a 100644 --- a/go-controller/pkg/libovsdb/ops/qos.go +++ b/go-controller/pkg/libovsdb/ops/qos.go @@ -3,8 +3,8 @@ package ops import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/router.go b/go-controller/pkg/libovsdb/ops/router.go index da518f7cb3..5f0ce594d4 100644 --- a/go-controller/pkg/libovsdb/ops/router.go +++ b/go-controller/pkg/libovsdb/ops/router.go @@ -8,8 +8,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" @@ -761,8 +761,8 @@ func CreateOrReplaceLogicalRouterStaticRouteWithPredicateOps( } // DeleteLogicalRouterStaticRoutesWithPredicate looks up logical router static -// routes from the cache based on a given predicate, deletes them and removes -// them from the provided logical router +// routes from the logical router of the specified name based on a given predicate, +// deletes them and removes them from the provided logical router func DeleteLogicalRouterStaticRoutesWithPredicate(nbClient libovsdbclient.Client, routerName string, p logicalRouterStaticRoutePredicate) error { var ops []ovsdb.Operation var err error @@ -775,32 +775,21 @@ func DeleteLogicalRouterStaticRoutesWithPredicate(nbClient libovsdbclient.Client } // DeleteLogicalRouterStaticRoutesWithPredicateOps looks up logical router static -// routes from the cache based on a given predicate, and returns the ops to delete -// them and remove them from the provided logical router +// routes from the logical router of the specified name based on a given predicate, +// and returns the ops to delete them and remove them from the provided logical router func DeleteLogicalRouterStaticRoutesWithPredicateOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, routerName string, p logicalRouterStaticRoutePredicate) ([]ovsdb.Operation, error) { - router := &nbdb.LogicalRouter{ - Name: routerName, + lrsrs, err := GetRouterLogicalRouterStaticRoutesWithPredicate(nbClient, &nbdb.LogicalRouter{Name: routerName}, p) + if err != nil { + if errors.Is(err, libovsdbclient.ErrNotFound) { + return ops, nil + } + return nil, fmt.Errorf("unable to find logical router static routes with predicate on router %s: %w", routerName, err) } - deleted := []*nbdb.LogicalRouterStaticRoute{} - opModels := []operationModel{ - { - ModelPredicate: p, - ExistingResult: &deleted, - DoAfter: func() { router.StaticRoutes = extractUUIDsFromModels(deleted) }, - ErrNotFound: false, - BulkOp: true, - }, - { - Model: router, - OnModelMutations: []interface{}{&router.StaticRoutes}, - ErrNotFound: false, - BulkOp: false, - }, + if len(lrsrs) == 0 { + return ops, nil } - - m := newModelClient(nbClient) - return m.DeleteOps(ops, opModels...) + return DeleteLogicalRouterStaticRoutesOps(nbClient, ops, routerName, lrsrs...) } // DeleteLogicalRouterStaticRoutesOps deletes the logical router static routes and @@ -943,6 +932,11 @@ func RemoveLoadBalancersFromLogicalRouterOps(nbClient libovsdbclient.Client, ops return ops, err } +func getNATMutableFields(nat *nbdb.NAT) []interface{} { + return []interface{}{&nat.Type, &nat.ExternalIP, &nat.LogicalIP, &nat.LogicalPort, &nat.ExternalMAC, + &nat.ExternalIDs, &nat.Match, &nat.Options, &nat.ExternalPortRange, &nat.GatewayPort, &nat.Priority} +} + func buildNAT( natType nbdb.NATType, externalIP string, @@ -961,6 +955,10 @@ func buildNAT( Match: match, } + if config.Gateway.Mode != config.GatewayModeDisabled { + nat.ExternalPortRange = config.Gateway.EphemeralPortRange + } + if logicalPort != "" { nat.LogicalPort = &logicalPort } @@ -1042,7 +1040,7 @@ func BuildDNATAndSNATWithMatch( // isEquivalentNAT checks if the `searched` NAT is equivalent to `existing`. // Returns true if the UUID is set in `searched` and matches the UUID of `existing`. // Otherwise, perform the following checks: -// - Compare the Type and Match fields. +// - Compare the Type. // - Compare ExternalIP if it is set in `searched`. // - Compare LogicalIP if the Type in `searched` is SNAT. // - Compare LogicalPort if it is set in `searched`. @@ -1057,11 +1055,7 @@ func isEquivalentNAT(existing *nbdb.NAT, searched *nbdb.NAT) bool { return false } - if searched.Match != existing.Match { - return false - } - - // Compre externalIP if its not empty. + // Compare externalIP if it's not empty. if searched.ExternalIP != "" && searched.ExternalIP != existing.ExternalIP { return false } @@ -1163,7 +1157,7 @@ func CreateOrUpdateNATsOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation } opModel := operationModel{ Model: inputNat, - OnModelUpdates: onModelUpdatesAllNonDefault(), + OnModelUpdates: getNATMutableFields(inputNat), ErrNotFound: false, BulkOp: false, DoAfter: func() { router.Nat = append(router.Nat, inputNat.UUID) }, @@ -1291,7 +1285,7 @@ func UpdateNATOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, nats .. opModel := []operationModel{ { Model: nat, - OnModelUpdates: onModelUpdatesAllNonDefault(), + OnModelUpdates: getNATMutableFields(nat), ErrNotFound: true, BulkOp: false, }, diff --git a/go-controller/pkg/libovsdb/ops/router_test.go b/go-controller/pkg/libovsdb/ops/router_test.go index fd4879ebd6..579814b27e 100644 --- a/go-controller/pkg/libovsdb/ops/router_test.go +++ b/go-controller/pkg/libovsdb/ops/router_test.go @@ -306,3 +306,99 @@ func TestDeleteRoutersWithPredicateOps(t *testing.T) { }) } } + +func TestDeleteLogicalRouterStaticRoutes(t *testing.T) { + fakeRouter1LRSR1 := &nbdb.LogicalRouterStaticRoute{ + UUID: buildNamedUUID(), + IPPrefix: "192.168.1.0/24", + Nexthop: "192.168.1.0", + ExternalIDs: map[string]string{"id": "v1"}, + } + + fakeRouter1LRSR2 := &nbdb.LogicalRouterStaticRoute{ + UUID: buildNamedUUID(), + IPPrefix: "192.169.1.0/24", + Nexthop: "192.169.1.0", + ExternalIDs: map[string]string{"id": "v2"}, + } + + fakeRouter2LRSR1 := &nbdb.LogicalRouterStaticRoute{ + UUID: buildNamedUUID(), + IPPrefix: "192.170.1.0/24", + Nexthop: "192.170.1.0", + ExternalIDs: map[string]string{"id": "v1"}, + } + + tests := []struct { + desc string + expectErr bool + routerName string + lrsrs []*nbdb.LogicalRouterStaticRoute + initialNbdb libovsdbtest.TestSetup + expectedNbdb libovsdbtest.TestSetup + }{ + { + desc: "delete logical router static route with predicate will only delete static route from the specified router", + initialNbdb: libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + fakeRouter1LRSR1, + fakeRouter1LRSR2, + fakeRouter2LRSR1, + &nbdb.LogicalRouter{ + Name: "rtr1", + UUID: buildNamedUUID(), + StaticRoutes: []string{fakeRouter1LRSR1.UUID, fakeRouter1LRSR2.UUID}, + }, + &nbdb.LogicalRouter{ + Name: "rtr2", + UUID: buildNamedUUID(), + StaticRoutes: []string{fakeRouter2LRSR1.UUID}, + }, + }, + }, + expectedNbdb: libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + fakeRouter1LRSR2, + fakeRouter2LRSR1, + &nbdb.LogicalRouter{ + Name: "rtr1", + UUID: buildNamedUUID(), + StaticRoutes: []string{fakeRouter1LRSR2.UUID}, + }, + &nbdb.LogicalRouter{ + Name: "rtr2", + UUID: buildNamedUUID(), + StaticRoutes: []string{fakeRouter2LRSR1.UUID}, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + nbClient, cleanup, err := libovsdbtest.NewNBTestHarness(tt.initialNbdb, nil) + if err != nil { + t.Fatalf("test: \"%s\" failed to set up test harness: %v", tt.desc, err) + } + t.Cleanup(cleanup.Cleanup) + + err = DeleteLogicalRouterStaticRoutesWithPredicate(nbClient, "rtr1", func(item *nbdb.LogicalRouterStaticRoute) bool { + return item.ExternalIDs["id"] == "v1" + }) + if err != nil && !tt.expectErr { + t.Fatal(fmt.Errorf("DeleteLogicalRouterStaticRoutesWithPredicate() error = %v", err)) + } + + matcher := libovsdbtest.HaveData(tt.expectedNbdb.NBData) + success, err := matcher.Match(nbClient) + + if !success { + t.Fatal(fmt.Errorf("test: \"%s\" didn't match expected with actual, err: %v", tt.desc, matcher.FailureMessage(nbClient))) + } + if err != nil { + t.Fatal(fmt.Errorf("test: \"%s\" encountered error: %v", tt.desc, err)) + } + }) + } +} diff --git a/go-controller/pkg/libovsdb/ops/sample.go b/go-controller/pkg/libovsdb/ops/sample.go index cb0ddbc6bf..8e7799fce3 100644 --- a/go-controller/pkg/libovsdb/ops/sample.go +++ b/go-controller/pkg/libovsdb/ops/sample.go @@ -5,9 +5,9 @@ import ( "golang.org/x/net/context" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/sb_global.go b/go-controller/pkg/libovsdb/ops/sb_global.go index 3fe14bf42d..28ee3ecad2 100644 --- a/go-controller/pkg/libovsdb/ops/sb_global.go +++ b/go-controller/pkg/libovsdb/ops/sb_global.go @@ -1,7 +1,7 @@ package ops import ( - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" ) diff --git a/go-controller/pkg/libovsdb/ops/switch.go b/go-controller/pkg/libovsdb/ops/switch.go index 01f724b45d..4136f96bba 100644 --- a/go-controller/pkg/libovsdb/ops/switch.go +++ b/go-controller/pkg/libovsdb/ops/switch.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/template_var.go b/go-controller/pkg/libovsdb/ops/template_var.go index 9c449d5f6b..ff4edc2f92 100644 --- a/go-controller/pkg/libovsdb/ops/template_var.go +++ b/go-controller/pkg/libovsdb/ops/template_var.go @@ -3,8 +3,8 @@ package ops import ( "context" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/ops/transact.go b/go-controller/pkg/libovsdb/ops/transact.go index 312cfdaffa..37aaf6808d 100644 --- a/go-controller/pkg/libovsdb/ops/transact.go +++ b/go-controller/pkg/libovsdb/ops/transact.go @@ -9,9 +9,9 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" ) diff --git a/go-controller/pkg/libovsdb/util/acl.go b/go-controller/pkg/libovsdb/util/acl.go index dbb6c2b3e5..24058b2a1d 100644 --- a/go-controller/pkg/libovsdb/util/acl.go +++ b/go-controller/pkg/libovsdb/util/acl.go @@ -7,7 +7,7 @@ import ( corev1 "k8s.io/api/core/v1" knet "k8s.io/api/networking/v1" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" @@ -88,11 +88,18 @@ func GetACLName(dbIDs *libovsdbops.DbObjectIDs) string { return fmt.Sprintf("%.63s", aclName) } +// BuildACLWithDefaultTier is used for the most ACL-related features with the default ACL tier. +// That includes egress firewall, network policy, multicast. +func BuildACLWithDefaultTier(dbIDs *libovsdbops.DbObjectIDs, priority int, match, action string, logLevels *ACLLoggingLevels, + aclT ACLPipelineType) *nbdb.ACL { + return BuildACL(dbIDs, priority, match, action, logLevels, aclT, types.DefaultACLTier) +} + // BuildACL should be used to build ACL instead of directly calling libovsdbops.BuildACL. // It can properly set and reset log settings for ACL based on ACLLoggingLevels, and // set acl.Name and acl.ExternalIDs based on given DbIDs func BuildACL(dbIDs *libovsdbops.DbObjectIDs, priority int, match, action string, logLevels *ACLLoggingLevels, - aclT ACLPipelineType) *nbdb.ACL { + aclT ACLPipelineType, tier int) *nbdb.ACL { var options map[string]string var direction string switch aclT { @@ -122,14 +129,13 @@ func BuildACL(dbIDs *libovsdbops.DbObjectIDs, priority int, match, action string log, externalIDs, options, - types.DefaultACLTier, + tier, ) return ACL } func BuildANPACL(dbIDs *libovsdbops.DbObjectIDs, priority int, match, action string, aclT ACLPipelineType, logLevels *ACLLoggingLevels) *nbdb.ACL { - anpACL := BuildACL(dbIDs, priority, match, action, logLevels, aclT) - anpACL.Tier = GetACLTier(dbIDs) + anpACL := BuildACL(dbIDs, priority, match, action, logLevels, aclT, GetACLTier(dbIDs)) return anpACL } diff --git a/go-controller/pkg/libovsdb/util/address_set.go b/go-controller/pkg/libovsdb/util/address_set.go index e4328d43a2..085abf2da6 100644 --- a/go-controller/pkg/libovsdb/util/address_set.go +++ b/go-controller/pkg/libovsdb/util/address_set.go @@ -4,8 +4,8 @@ import ( "fmt" "strings" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/util/mac_binding.go b/go-controller/pkg/libovsdb/util/mac_binding.go index e6f11d8347..d6410a9b65 100644 --- a/go-controller/pkg/libovsdb/util/mac_binding.go +++ b/go-controller/pkg/libovsdb/util/mac_binding.go @@ -3,9 +3,9 @@ package util import ( "fmt" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" diff --git a/go-controller/pkg/libovsdb/util/metric.go b/go-controller/pkg/libovsdb/util/metric.go index 06c787f5fe..89015f343e 100644 --- a/go-controller/pkg/libovsdb/util/metric.go +++ b/go-controller/pkg/libovsdb/util/metric.go @@ -3,7 +3,7 @@ package util import ( "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/util/nb_global.go b/go-controller/pkg/libovsdb/util/nb_global.go index 57d9c69b97..d1bc11f0a2 100644 --- a/go-controller/pkg/libovsdb/util/nb_global.go +++ b/go-controller/pkg/libovsdb/util/nb_global.go @@ -3,7 +3,7 @@ package util import ( "fmt" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/util/port.go b/go-controller/pkg/libovsdb/util/port.go index 8e5cbe616f..bc82042419 100644 --- a/go-controller/pkg/libovsdb/util/port.go +++ b/go-controller/pkg/libovsdb/util/port.go @@ -5,7 +5,7 @@ import ( "net" "strings" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/libovsdb/util/router.go b/go-controller/pkg/libovsdb/util/router.go index 12d7db3b27..b316fea0e3 100644 --- a/go-controller/pkg/libovsdb/util/router.go +++ b/go-controller/pkg/libovsdb/util/router.go @@ -8,12 +8,11 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -34,11 +33,7 @@ import ( // (TODO: FIXME): With this route, we are officially breaking support for IC with zones that have multiple-nodes // NOTE: This route is exactly the same as what is added by pod-live-migration feature and we keep the route exactly // same across the 3 features so that if the route already exists on the node, this is just a no-op -func CreateDefaultRouteToExternal(nbClient libovsdbclient.Client, clusterRouter, gwRouterName string, clusterSubnets []config.CIDRNetworkEntry) error { - gatewayIPs, err := GetLRPAddrs(nbClient, types.GWRouterToJoinSwitchPrefix+gwRouterName) - if err != nil { - return fmt.Errorf("attempt at finding node gateway router %s network information failed, err: %w", gwRouterName, err) - } +func CreateDefaultRouteToExternal(nbClient libovsdbclient.Client, clusterRouter, gwRouterName string, clusterSubnets []config.CIDRNetworkEntry, gatewayIPs []*net.IPNet) error { for _, clusterSubnet := range clusterSubnets { isClusterSubnetIPV6 := utilnet.IsIPv6String(clusterSubnet.CIDR.IP.String()) gatewayIP, err := util.MatchFirstIPNetFamily(isClusterSubnetIPV6, gatewayIPs) diff --git a/go-controller/pkg/libovsdb/util/router_test.go b/go-controller/pkg/libovsdb/util/router_test.go index e2047f57a8..6b0b325189 100644 --- a/go-controller/pkg/libovsdb/util/router_test.go +++ b/go-controller/pkg/libovsdb/util/router_test.go @@ -31,6 +31,9 @@ func TestCreateDefaultRouteToExternal(t *testing.T) { gwRouterPortName := types.GWRouterToJoinSwitchPrefix + gwRouterName gwRouterIPAddressV4 := "100.64.0.3" gwRouterIPAddressV6 := "fd98::3" + gwRouterIPAddressV4CIDR := fmt.Sprintf("%s/32", gwRouterIPAddressV4) + gwRouterIPAddressV6CIDR := fmt.Sprintf("%s/128", gwRouterIPAddressV6) + gatewayIPs := []*net.IPNet{ovntest.MustParseIPNet(gwRouterIPAddressV4CIDR), ovntest.MustParseIPNet(gwRouterIPAddressV6CIDR)} gwRouterPort := &nbdb.LogicalRouterPort{ UUID: gwRouterPortName + "-uuid", Name: gwRouterPortName, @@ -228,7 +231,7 @@ func TestCreateDefaultRouteToExternal(t *testing.T) { tc.preTestAction() } - if err = CreateDefaultRouteToExternal(nbClient, ovnClusterRouterName, gwRouterName, config.Default.ClusterSubnets); err != nil { + if err = CreateDefaultRouteToExternal(nbClient, ovnClusterRouterName, gwRouterName, config.Default.ClusterSubnets, gatewayIPs); err != nil { t.Fatal(fmt.Errorf("failed to run CreateDefaultRouteToExternal: %v", err)) } diff --git a/go-controller/pkg/libovsdb/util/switch.go b/go-controller/pkg/libovsdb/util/switch.go index c0a3b0ca52..c3b1eb9e02 100644 --- a/go-controller/pkg/libovsdb/util/switch.go +++ b/go-controller/pkg/libovsdb/util/switch.go @@ -9,7 +9,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/metrics/cluster_manager.go b/go-controller/pkg/metrics/cluster_manager.go index 3acba72759..711d4dc026 100644 --- a/go-controller/pkg/metrics/cluster_manager.go +++ b/go-controller/pkg/metrics/cluster_manager.go @@ -7,28 +7,29 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" ) var registerClusterManagerBaseMetrics sync.Once // MetricClusterManagerLeader identifies whether this instance of ovnkube-cluster-manager is a leader or not var MetricClusterManagerLeader = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "leader", Help: "Identifies whether the instance of ovnkube-cluster-manager is a leader(1) or not(0).", }) var MetricClusterManagerReadyDuration = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "ready_duration_seconds", Help: "The duration for the cluster manager to get to ready state", }) var metricV4HostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "num_v4_host_subnets", Help: "The total number of v4 host subnets possible per network"}, []string{ @@ -37,8 +38,8 @@ var metricV4HostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricV6HostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "num_v6_host_subnets", Help: "The total number of v6 host subnets possible per network"}, []string{ @@ -47,8 +48,8 @@ var metricV6HostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricV4AllocatedHostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "allocated_v4_host_subnets", Help: "The total number of v4 host subnets currently allocated per network"}, []string{ @@ -57,8 +58,8 @@ var metricV4AllocatedHostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOp ) var metricV6AllocatedHostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "allocated_v6_host_subnets", Help: "The total number of v6 host subnets currently allocated per network"}, []string{ @@ -68,28 +69,50 @@ var metricV6AllocatedHostSubnetCount = prometheus.NewGaugeVec(prometheus.GaugeOp /** EgressIP metrics recorded from cluster-manager begins**/ var metricEgressIPCount = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "num_egress_ips", Help: "The number of defined egress IP addresses", }) var metricEgressIPNodeUnreacheableCount = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "egress_ips_node_unreachable_total", Help: "The total number of times assigned egress IP(s) were unreachable"}, ) var metricEgressIPRebalanceCount = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "egress_ips_rebalance_total", Help: "The total number of times assigned egress IP(s) needed to be moved to a different node"}, ) /** EgressIP metrics recorded from cluster-manager ends**/ +var metricUDNCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, + Name: "user_defined_networks", + Help: "The total number of UserDefinedNetworks in the cluster"}, + []string{ + "role", + "topology", + }, +) + +var metricCUDNCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, + Name: "cluster_user_defined_networks", + Help: "The total number of ClusterUserDefinedNetworks in the cluster"}, + []string{ + "role", + "topology", + }, +) + // RegisterClusterManagerBase registers ovnkube cluster manager base metrics with the Prometheus registry. // This function should only be called once. func RegisterClusterManagerBase() { @@ -98,8 +121,8 @@ func RegisterClusterManagerBase() { prometheus.MustRegister(MetricClusterManagerReadyDuration) prometheus.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemClusterManager, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemClusterManager, Name: "build_info", Help: "A metric with a constant '1' value labeled by version, revision, branch, " + "and go version from which ovnkube was built and when and who built it", @@ -129,6 +152,8 @@ func RegisterClusterManagerFunctional() { prometheus.MustRegister(metricEgressIPRebalanceCount) prometheus.MustRegister(metricEgressIPCount) } + prometheus.MustRegister(metricUDNCount) + prometheus.MustRegister(metricCUDNCount) if err := prometheus.Register(MetricResourceRetryFailuresCount); err != nil { if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { panic(err) @@ -164,3 +189,23 @@ func RecordEgressIPRebalance(count int) { func RecordEgressIPCount(count float64) { metricEgressIPCount.Set(count) } + +// IncrementUDNCount increments the number of UserDefinedNetworks of the given type +func IncrementUDNCount(role, topology string) { + metricUDNCount.WithLabelValues(role, topology).Inc() +} + +// DecrementUDNCount decrements the number of UserDefinedNetworks of the given type +func DecrementUDNCount(role, topology string) { + metricUDNCount.WithLabelValues(role, topology).Dec() +} + +// IncrementCUDNCount increments the number of ClusterUserDefinedNetworks of the given type +func IncrementCUDNCount(role, topology string) { + metricCUDNCount.WithLabelValues(role, topology).Inc() +} + +// DecrementCUDNCount decrements the number of ClusterUserDefinedNetworks of the given type +func DecrementCUDNCount(role, topology string) { + metricCUDNCount.WithLabelValues(role, topology).Dec() +} diff --git a/go-controller/pkg/metrics/metrics.go b/go-controller/pkg/metrics/metrics.go index 847619b8dd..b8e9d736c7 100644 --- a/go-controller/pkg/metrics/metrics.go +++ b/go-controller/pkg/metrics/metrics.go @@ -24,25 +24,14 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) const ( - MetricOvnkubeNamespace = "ovnkube" - MetricOvnkubeSubsystemController = "controller" - MetricOvnkubeSubsystemClusterManager = "clustermanager" - MetricOvnkubeSubsystemNode = "node" - MetricOvnNamespace = "ovn" - MetricOvnSubsystemDB = "db" - MetricOvnSubsystemNorthd = "northd" - MetricOvnSubsystemController = "controller" - MetricOvsNamespace = "ovs" - MetricOvsSubsystemVswitchd = "vswitchd" - MetricOvsSubsystemDB = "db" - ovnNorthd = "ovn-northd" ovnController = "ovn-controller" ovsVswitchd = "ovs-vswitchd" @@ -82,7 +71,7 @@ type stopwatchStatistics struct { // resource reached the maximum retry limit and will not be retried. This metric doesn't // need Subsystem string since it is applicable for both master and node. var MetricResourceRetryFailuresCount = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: MetricOvnkubeNamespace, + Namespace: types.MetricOvnkubeNamespace, Name: "resource_retry_failures_total", Help: "The total number of times processing a Kubernetes resource reached the maximum retry limit and was no longer processed", }) diff --git a/go-controller/pkg/metrics/node.go b/go-controller/pkg/metrics/node.go index 3b19c334d7..07e621fc97 100644 --- a/go-controller/pkg/metrics/node.go +++ b/go-controller/pkg/metrics/node.go @@ -7,13 +7,14 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" ) // MetricCNIRequestDuration is a prometheus metric that tracks the duration // of CNI requests var MetricCNIRequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemNode, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemNode, Name: "cni_request_duration_seconds", Help: "The duration of CNI server requests.", Buckets: prometheus.ExponentialBuckets(.1, 2, 15)}, @@ -22,23 +23,23 @@ var MetricCNIRequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOp ) var MetricNodeReadyDuration = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemNode, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemNode, Name: "ready_duration_seconds", Help: "The duration for the node to get to ready state.", }) var metricOvnNodePortEnabled = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemNode, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemNode, Name: "nodeport_enabled", Help: "Specifies if the node port is enabled on this node(1) or not(0).", }) // metric to get the size of ovnkube.log file var metricOvnKubeNodeLogFileSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemNode, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemNode, Name: "logfile_size_bytes", Help: "The size of ovnkube logfile on the node."}, []string{ @@ -56,8 +57,8 @@ func RegisterNodeMetrics(stopChan <-chan struct{}) { prometheus.MustRegister(metricOvnNodePortEnabled) prometheus.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemNode, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemNode, Name: "build_info", Help: "A metric with a constant '1' value labeled by version, revision, branch, " + "and go version from which ovnkube was built and when and who built it.", @@ -72,7 +73,7 @@ func RegisterNodeMetrics(stopChan <-chan struct{}) { }, func() float64 { return 1 }, )) - registerWorkqueueMetrics(MetricOvnkubeNamespace, MetricOvnkubeSubsystemNode) + registerWorkqueueMetrics(types.MetricOvnkubeNamespace, types.MetricOvnkubeSubsystemNode) if err := prometheus.Register(MetricResourceRetryFailuresCount); err != nil { if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { panic(err) diff --git a/go-controller/pkg/metrics/ovn.go b/go-controller/pkg/metrics/ovn.go index 4cce457ea5..51510fd7f9 100644 --- a/go-controller/pkg/metrics/ovn.go +++ b/go-controller/pkg/metrics/ovn.go @@ -9,32 +9,33 @@ import ( "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" ovsops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/ovs" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/vswitchd" ) // ovnController Configuration metrics var metricRemoteProbeInterval = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "remote_probe_interval_seconds", Help: "The inactivity probe interval of the connection to the OVN SB DB.", }) var metricOpenFlowProbeInterval = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "openflow_probe_interval_seconds", Help: "The inactivity probe interval of the OpenFlow connection to the " + "OpenvSwitch integration bridge.", }) var metricMonitorAll = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "monitor_all", Help: "Specifies if ovn-controller should monitor all records of tables in OVN SB DB. " + "If set to false, it will conditionally monitor the records that " + @@ -42,8 +43,8 @@ var metricMonitorAll = prometheus.NewGauge(prometheus.GaugeOpts{ }) var metricEncapIP = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "encap_ip", Help: "A metric with a constant '1' value labeled by ipadress that " + "specifies the encapsulation ip address configured on that node.", @@ -54,8 +55,8 @@ var metricEncapIP = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricSbConnectionMethod = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "sb_connection_method", Help: "A metric with a constant '1' value labeled by connection_method that " + "specifies the ovn-remote value configured on that node.", @@ -66,8 +67,8 @@ var metricSbConnectionMethod = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricEncapType = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "encap_type", Help: "A metric with a constant '1' value labeled by type that " + "specifies the encapsulation type a chassis should use to " + @@ -79,8 +80,8 @@ var metricEncapType = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricBridgeMappings = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "bridge_mappings", Help: "A metric with a constant '1' value labeled by mapping that " + "specifies list of key-value pairs that map a physical network name " + @@ -92,8 +93,8 @@ var metricBridgeMappings = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOVNControllerSBDBConnection = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "southbound_database_connected", Help: "Specifies if OVN controller is connected to OVN southbound database (1) or not (0)", }) @@ -261,11 +262,11 @@ func setOvnControllerConfigurationMetrics(ovsDBClient libovsdbclient.Client) (er } openflowProbeField := openvSwitch.ExternalIDs["ovn-bridge-remote-probe-interval"] - openflowProbeVal := parseMetricToFloat(MetricOvnSubsystemController, "ovn-bridge-remote-probe-interval", openflowProbeField) + openflowProbeVal := parseMetricToFloat(types.MetricOvnSubsystemController, "ovn-bridge-remote-probe-interval", openflowProbeField) metricOpenFlowProbeInterval.Set(openflowProbeVal) remoteProbeField := openvSwitch.ExternalIDs["ovn-remote-probe-interval"] - remoteProbeValue := parseMetricToFloat(MetricOvnSubsystemController, "ovn-remote-probe-interval", remoteProbeField) + remoteProbeValue := parseMetricToFloat(types.MetricOvnSubsystemController, "ovn-remote-probe-interval", remoteProbeField) metricRemoteProbeInterval.Set(remoteProbeValue / 1000) var ovnMonitorValue float64 @@ -406,8 +407,8 @@ func RegisterOvnControllerMetrics(ovsDBClient libovsdbclient.Client, getOvnControllerVersionInfo() ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "build_info", Help: "A metric with a constant '1' value labeled by version and library " + "from which ovn binaries were built", @@ -423,8 +424,8 @@ func RegisterOvnControllerMetrics(ovsDBClient libovsdbclient.Client, ovnRegistry.MustRegister(metricOVNControllerSBDBConnection) ovnRegistry.MustRegister(prometheus.NewCounterFunc( prometheus.CounterOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "integration_bridge_openflow_total", Help: "The total number of OpenFlow flows in the integration bridge.", }, func() float64 { @@ -437,7 +438,7 @@ func RegisterOvnControllerMetrics(ovsDBClient libovsdbclient.Client, for _, kvPair := range strings.Fields(stdout) { if strings.HasPrefix(kvPair, "flow_count=") { value := strings.Split(kvPair, "=")[1] - return parseMetricToFloat(MetricOvnSubsystemController, "integration_bridge_openflow_total", + return parseMetricToFloat(types.MetricOvnSubsystemController, "integration_bridge_openflow_total", value) } } @@ -445,8 +446,8 @@ func RegisterOvnControllerMetrics(ovsDBClient libovsdbclient.Client, })) ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "integration_bridge_patch_ports", Help: "Captures the number of patch ports that connect br-int OVS " + "bridge to physical OVS bridge and br-local OVS bridge.", @@ -456,8 +457,8 @@ func RegisterOvnControllerMetrics(ovsDBClient libovsdbclient.Client, })) ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemController, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemController, Name: "integration_bridge_geneve_ports", Help: "Captures the number of geneve ports that are on br-int OVS bridge.", }, @@ -475,11 +476,11 @@ func RegisterOvnControllerMetrics(ovsDBClient libovsdbclient.Client, ovnRegistry.MustRegister(metricBridgeMappings) // Register the ovn-controller coverage/show metrics componentCoverageShowMetricsMap[ovnController] = ovnControllerCoverageShowMetricsMap - registerCoverageShowMetrics(ovnController, MetricOvnNamespace, MetricOvnSubsystemController) + registerCoverageShowMetrics(ovnController, types.MetricOvnNamespace, types.MetricOvnSubsystemController) // Register the ovn-controller coverage/show metrics componentStopwatchShowMetricsMap[ovnController] = ovnControllerStopwatchShowMetricsMap - registerStopwatchShowMetrics(ovnController, MetricOvnNamespace, MetricOvnSubsystemController) + registerStopwatchShowMetrics(ovnController, types.MetricOvnNamespace, types.MetricOvnSubsystemController) // ovn-controller configuration metrics updater go ovnControllerConfigurationMetricsUpdater(ovsDBClient, diff --git a/go-controller/pkg/metrics/ovn_db.go b/go-controller/pkg/metrics/ovn_db.go index 8116a68732..ea206adc3c 100644 --- a/go-controller/pkg/metrics/ovn_db.go +++ b/go-controller/pkg/metrics/ovn_db.go @@ -14,12 +14,13 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) var metricOVNDBSessions = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "jsonrpc_server_sessions", Help: "Active number of JSON RPC Server sessions to the DB"}, []string{ @@ -28,8 +29,8 @@ var metricOVNDBSessions = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOVNDBMonitor = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "ovsdb_monitors", Help: "Number of OVSDB Monitors on the server"}, []string{ @@ -38,8 +39,8 @@ var metricOVNDBMonitor = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "db_size_bytes", Help: "The size of the database file associated with the OVN DB component."}, []string{ @@ -49,8 +50,8 @@ var metricDBSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // ClusterStatus metrics var metricDBClusterCID = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_id", Help: "A metric with a constant '1' value labeled by database name and cluster uuid"}, []string{ @@ -60,8 +61,8 @@ var metricDBClusterCID = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterSID = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_server_id", Help: "A metric with a constant '1' value labeled by database name, cluster uuid " + "and server uuid"}, @@ -73,8 +74,8 @@ var metricDBClusterSID = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterServerStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_server_status", Help: "A metric with a constant '1' value labeled by database name, cluster uuid, server uuid " + "server status"}, @@ -87,8 +88,8 @@ var metricDBClusterServerStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterServerRole = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_server_role", Help: "A metric with a constant '1' value labeled by database name, cluster uuid, server uuid " + "and server role"}, @@ -101,8 +102,8 @@ var metricDBClusterServerRole = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterTerm = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_term", Help: "A metric that returns the current election term value labeled by database name, cluster uuid, and " + "server uuid"}, @@ -114,8 +115,8 @@ var metricDBClusterTerm = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterServerVote = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_server_vote", Help: "A metric with a constant '1' value labeled by database name, cluster uuid, server uuid " + "and server vote"}, @@ -128,8 +129,8 @@ var metricDBClusterServerVote = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterElectionTimer = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_election_timer", Help: "A metric that returns the current election timer value labeled by database name, cluster uuid, " + "and server uuid"}, @@ -141,8 +142,8 @@ var metricDBClusterElectionTimer = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterLogIndexStart = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_log_index_start", Help: "A metric that returns the log entry index start value labeled by database name, cluster uuid, " + "and server uuid"}, @@ -154,8 +155,8 @@ var metricDBClusterLogIndexStart = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterLogIndexNext = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_log_index_next", Help: "A metric that returns the log entry index next value labeled by database name, cluster uuid, " + "and server uuid"}, @@ -167,8 +168,8 @@ var metricDBClusterLogIndexNext = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterLogNotCommitted = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_log_not_committed", Help: "A metric that returns the number of log entries not committed labeled by database name, cluster uuid, " + "and server uuid"}, @@ -180,8 +181,8 @@ var metricDBClusterLogNotCommitted = prometheus.NewGaugeVec(prometheus.GaugeOpts ) var metricDBClusterLogNotApplied = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_log_not_applied", Help: "A metric that returns the number of log entries not applied labeled by database name, cluster uuid, " + "and server uuid"}, @@ -193,8 +194,8 @@ var metricDBClusterLogNotApplied = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterConnIn = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_inbound_connections_total", Help: "A metric that returns the total number of inbound connections to the server labeled by " + "database name, cluster uuid, and server uuid"}, @@ -206,8 +207,8 @@ var metricDBClusterConnIn = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterConnOut = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_outbound_connections_total", Help: "A metric that returns the total number of outbound connections from the server labeled by " + "database name, cluster uuid, and server uuid"}, @@ -219,8 +220,8 @@ var metricDBClusterConnOut = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterConnInErr = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_inbound_connections_error_total", Help: "A metric that returns the total number of failed inbound connections to the server labeled by " + " database name, cluster uuid, and server uuid"}, @@ -232,8 +233,8 @@ var metricDBClusterConnInErr = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricDBClusterConnOutErr = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "cluster_outbound_connections_error_total", Help: "A metric that returns the total number of failed outbound connections from the server labeled by " + "database name, cluster uuid, and server uuid"}, @@ -382,8 +383,8 @@ func RegisterOvnDBMetrics(clientset kubernetes.Interface, k8sNodeName string, st ovnRegistry.MustRegister(metricOVNDBSessions) ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "build_info", Help: "A metric with a constant '1' value labeled by ovsdb-server version and " + "NB and SB schema version", diff --git a/go-controller/pkg/metrics/ovn_northd.go b/go-controller/pkg/metrics/ovn_northd.go index ae2afe45c8..e72c89fdd3 100644 --- a/go-controller/pkg/metrics/ovn_northd.go +++ b/go-controller/pkg/metrics/ovn_northd.go @@ -11,6 +11,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -128,8 +129,8 @@ func RegisterOvnNorthdMetrics(clientset kubernetes.Interface, k8sNodeName string getOvnNorthdVersionInfo() ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemNorthd, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemNorthd, Name: "build_info", Help: "A metric with a constant '1' value labeled by version and library " + "from which ovn binaries were built", @@ -142,8 +143,8 @@ func RegisterOvnNorthdMetrics(clientset kubernetes.Interface, k8sNodeName string )) ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemNorthd, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemNorthd, Name: "status", Help: "Specifies whether this instance of ovn-northd is standby(0) or active(1) or paused(2).", }, func() float64 { @@ -169,8 +170,8 @@ func RegisterOvnNorthdMetrics(clientset kubernetes.Interface, k8sNodeName string )) ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemNorthd, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemNorthd, Name: "nb_connection_status", Help: "Specifies nb-connection-status of ovn-northd, not connected(0) or connected(1).", }, func() float64 { @@ -179,8 +180,8 @@ func RegisterOvnNorthdMetrics(clientset kubernetes.Interface, k8sNodeName string )) ovnRegistry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemNorthd, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemNorthd, Name: "sb_connection_status", Help: "Specifies sb-connection-status of ovn-northd, not connected(0) or connected(1).", }, func() float64 { @@ -190,11 +191,11 @@ func RegisterOvnNorthdMetrics(clientset kubernetes.Interface, k8sNodeName string // Register the ovn-northd coverage/show metrics with prometheus componentCoverageShowMetricsMap[ovnNorthd] = ovnNorthdCoverageShowMetricsMap - registerCoverageShowMetrics(ovnNorthd, MetricOvnNamespace, MetricOvnSubsystemNorthd) + registerCoverageShowMetrics(ovnNorthd, types.MetricOvnNamespace, types.MetricOvnSubsystemNorthd) go coverageShowMetricsUpdater(ovnNorthd, stopChan) // Register the ovn-northd stopwatch/show metrics with prometheus componentStopwatchShowMetricsMap[ovnNorthd] = ovnNorthdStopwatchShowMetricsMap - registerStopwatchShowMetrics(ovnNorthd, MetricOvnNamespace, MetricOvnSubsystemNorthd) + registerStopwatchShowMetrics(ovnNorthd, types.MetricOvnNamespace, types.MetricOvnSubsystemNorthd) go stopwatchShowMetricsUpdater(ovnNorthd, stopChan) } diff --git a/go-controller/pkg/metrics/ovnkube_controller.go b/go-controller/pkg/metrics/ovnkube_controller.go index b73a0ad8de..30c846d07c 100644 --- a/go-controller/pkg/metrics/ovnkube_controller.go +++ b/go-controller/pkg/metrics/ovnkube_controller.go @@ -3,8 +3,6 @@ package metrics import ( "errors" "fmt" - "hash/fnv" - "math" "runtime" "strconv" "sync" @@ -18,17 +16,16 @@ import ( "k8s.io/client-go/util/workqueue" klog "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/cache" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/cache" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -37,16 +34,16 @@ import ( // read from SB DB. This is registered within func RunTimestamp in order to allow gathering this // metric on the fly when metrics are scraped. var metricNbE2eTimestamp = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "nb_e2e_timestamp", Help: "The current e2e-timestamp value as written to the northbound database"}, ) // metricDbTimestamp is the UNIX timestamp seen in NB and SB DBs. var metricDbTimestamp = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemDB, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemDB, Name: "e2e_timestamp", Help: "The current e2e-timestamp value as observed in this instance of the database"}, []string{ @@ -57,8 +54,8 @@ var metricDbTimestamp = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // metricPodCreationLatency is the time between a pod being scheduled and // completing its logical switch port configuration. var metricPodCreationLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_creation_latency_seconds", Help: "The duration between a pod being scheduled and completing its logical switch port configuration", Buckets: prometheus.ExponentialBuckets(.1, 2, 15), @@ -66,8 +63,8 @@ var metricPodCreationLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ // MetricResourceUpdateCount is the number of times a particular resource's UpdateFunc has been called. var MetricResourceUpdateCount = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "resource_update_total", Help: "The number of times a given resource event (add, update, or delete) has been handled"}, []string{ @@ -79,8 +76,8 @@ var MetricResourceUpdateCount = prometheus.NewCounterVec(prometheus.CounterOpts{ // MetricResourceAddLatency is the time taken to complete resource update by an handler. // This measures the latency for all of the handlers for a given resource. var MetricResourceAddLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "resource_add_latency_seconds", Help: "The duration to process all handlers for a given resource event - add.", Buckets: prometheus.ExponentialBuckets(.1, 2, 15)}, @@ -89,8 +86,8 @@ var MetricResourceAddLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ // MetricResourceUpdateLatency is the time taken to complete resource update by an handler. // This measures the latency for all of the handlers for a given resource. var MetricResourceUpdateLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "resource_update_latency_seconds", Help: "The duration to process all handlers for a given resource event - update.", Buckets: prometheus.ExponentialBuckets(.1, 2, 15)}, @@ -99,8 +96,8 @@ var MetricResourceUpdateLatency = prometheus.NewHistogram(prometheus.HistogramOp // MetricResourceDeleteLatency is the time taken to complete resource update by an handler. // This measures the latency for all of the handlers for a given resource. var MetricResourceDeleteLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "resource_delete_latency_seconds", Help: "The duration to process all handlers for a given resource event - delete.", Buckets: prometheus.ExponentialBuckets(.1, 2, 15)}, @@ -108,32 +105,32 @@ var MetricResourceDeleteLatency = prometheus.NewHistogram(prometheus.HistogramOp // MetricRequeueServiceCount is the number of times a particular service has been requeued. var MetricRequeueServiceCount = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "requeue_service_total", Help: "A metric that captures the number of times a service is requeued after failing to sync with OVN"}, ) // MetricSyncServiceCount is the number of times a particular service has been synced. var MetricSyncServiceCount = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "sync_service_total", Help: "A metric that captures the number of times a service is synced with OVN load balancers"}, ) // MetricSyncServiceLatency is the time taken to sync a service with the OVN load balancers. var MetricSyncServiceLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "sync_service_latency_seconds", Help: "The latency of syncing a service with the OVN load balancers", Buckets: prometheus.ExponentialBuckets(.1, 2, 15)}, ) var MetricOVNKubeControllerReadyDuration = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "ready_duration_seconds", Help: "The duration for the ovnkube-controller to get to ready state", }) @@ -141,8 +138,8 @@ var MetricOVNKubeControllerReadyDuration = prometheus.NewGauge(prometheus.GaugeO // MetricOVNKubeControllerSyncDuration is the time taken to complete initial Watch for different resource. // Resource name is in the label. var MetricOVNKubeControllerSyncDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "sync_duration_seconds", Help: "The duration to sync and setup all handlers for a given resource"}, []string{ @@ -151,15 +148,15 @@ var MetricOVNKubeControllerSyncDuration = prometheus.NewGaugeVec(prometheus.Gaug // MetricOVNKubeControllerLeader identifies whether this instance of ovnkube-controller is a leader or not var MetricOVNKubeControllerLeader = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "leader", Help: "Identifies whether the instance of ovnkube-controller is a leader(1) or not(0).", }) var metricOvnKubeControllerLogFileSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "logfile_size_bytes", Help: "The size of ovnkube-controller log file."}, []string{ @@ -168,24 +165,24 @@ var metricOvnKubeControllerLogFileSize = prometheus.NewGaugeVec(prometheus.Gauge ) var metricEgressIPAssignLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "egress_ips_assign_latency_seconds", Help: "The latency of egress IP assignment to ovn nb database", Buckets: prometheus.ExponentialBuckets(.001, 2, 15), }) var metricEgressIPUnassignLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "egress_ips_unassign_latency_seconds", Help: "The latency of egress IP unassignment from ovn nb database", Buckets: prometheus.ExponentialBuckets(.001, 2, 15), }) var metricNetpolEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "network_policy_event_latency_seconds", Help: "The latency of full network policy event handling (create, delete)", Buckets: prometheus.ExponentialBuckets(.004, 2, 15)}, @@ -194,8 +191,8 @@ var metricNetpolEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOp }) var metricNetpolLocalPodEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "network_policy_local_pod_event_latency_seconds", Help: "The latency of local pod events handling (add, delete)", Buckets: prometheus.ExponentialBuckets(.002, 2, 15)}, @@ -204,8 +201,8 @@ var metricNetpolLocalPodEventLatency = prometheus.NewHistogramVec(prometheus.His }) var metricNetpolPeerNamespaceEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "network_policy_peer_namespace_event_latency_seconds", Help: "The latency of peer namespace events handling (add, delete)", Buckets: prometheus.ExponentialBuckets(.002, 2, 15)}, @@ -214,8 +211,8 @@ var metricNetpolPeerNamespaceEventLatency = prometheus.NewHistogramVec(prometheu }) var metricPodSelectorAddrSetPodEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_selector_address_set_pod_event_latency_seconds", Help: "The latency of peer pod events handling (add, delete)", Buckets: prometheus.ExponentialBuckets(.002, 2, 15)}, @@ -224,8 +221,8 @@ var metricPodSelectorAddrSetPodEventLatency = prometheus.NewHistogramVec(prometh }) var metricPodSelectorAddrSetNamespaceEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_selector_address_set_namespace_event_latency_seconds", Help: "The latency of peer namespace events handling (add, delete)", Buckets: prometheus.ExponentialBuckets(.002, 2, 15)}, @@ -234,8 +231,8 @@ var metricPodSelectorAddrSetNamespaceEventLatency = prometheus.NewHistogramVec(p }) var metricPodEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_event_latency_seconds", Help: "The latency of pod events handling (add, update, delete)", Buckets: prometheus.ExponentialBuckets(.002, 2, 15)}, @@ -244,51 +241,51 @@ var metricPodEventLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ }) var metricEgressFirewallRuleCount = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "num_egress_firewall_rules", Help: "The number of egress firewall rules defined"}, ) var metricIPsecEnabled = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "ipsec_enabled", Help: "Specifies whether IPSec is enabled for this cluster(1) or not enabled for this cluster(0)", }) var metricEgressRoutingViaHost = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "egress_routing_via_host", Help: "Specifies whether egress gateway mode is via host networking stack(1) or not(0)", }) var metricEgressFirewallCount = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "num_egress_firewalls", Help: "The number of egress firewall policies", }) /** AdminNetworkPolicyMetrics Begin**/ var metricANPCount = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "admin_network_policies", Help: "The total number of admin network policies in the cluster", }) var metricBANPCount = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "baseline_admin_network_policies", Help: "The total number of baseline admin network policies in the cluster", }) var metricANPDBObjects = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "admin_network_policies_db_objects", Help: "The total number of OVN NBDB objects (table_name) owned by AdminNetworkPolicy controller in the cluster"}, []string{ @@ -297,8 +294,8 @@ var metricANPDBObjects = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricBANPDBObjects = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "baseline_admin_network_policies_db_objects", Help: "The total number of OVN NBDB objects (table_name) owned by BaselineAdminNetworkPolicy controller in the cluster"}, []string{ @@ -310,64 +307,37 @@ var metricBANPDBObjects = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // metricFirstSeenLSPLatency is the time between a pod first seen in OVN-Kubernetes and its Logical Switch Port is created var metricFirstSeenLSPLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_first_seen_lsp_created_duration_seconds", Help: "The duration between a pod first observed in OVN-Kubernetes and Logical Switch Port created", Buckets: prometheus.ExponentialBuckets(.01, 2, 15), }) var metricLSPPortBindingLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_lsp_created_port_binding_duration_seconds", Help: "The duration between a pods Logical Switch Port created and port binding observed in cache", Buckets: prometheus.ExponentialBuckets(.01, 2, 15), }) var metricPortBindingChassisLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_port_binding_port_binding_chassis_duration_seconds", Help: "The duration between a pods port binding observed and port binding chassis update observed in cache", Buckets: prometheus.ExponentialBuckets(.01, 2, 15), }) var metricPortBindingUpLatency = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "pod_port_binding_chassis_port_binding_up_duration_seconds", Help: "The duration between a pods port binding chassis update and port binding up observed in cache", Buckets: prometheus.ExponentialBuckets(.01, 2, 15), }) -var metricNetworkProgramming prometheus.ObserverVec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, - Name: "network_programming_duration_seconds", - Help: "The duration to apply network configuration for a kind (e.g. pod, service, networkpolicy). " + - "Configuration includes add, update and delete events for each kind.", - Buckets: merge( - prometheus.LinearBuckets(0.25, 0.25, 2), // 0.25s, 0.50s - prometheus.LinearBuckets(1, 1, 59), // 1s, 2s, 3s, ... 59s - prometheus.LinearBuckets(60, 5, 12), // 60s, 65s, 70s, ... 115s - prometheus.LinearBuckets(120, 30, 11))}, // 2min, 2.5min, 3min, ..., 7min - []string{ - "kind", - }) - -var metricNetworkProgrammingOVN = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, - Name: "network_programming_ovn_duration_seconds", - Help: "The duration for OVN to apply network configuration", - Buckets: merge( - prometheus.LinearBuckets(0.25, 0.25, 2), // 0.25s, 0.50s - prometheus.LinearBuckets(1, 1, 59), // 1s, 2s, 3s, ... 59s - prometheus.LinearBuckets(60, 5, 12), // 60s, 65s, 70s, ... 115s - prometheus.LinearBuckets(120, 30, 11))}, // 2min, 2.5min, 3min, ..., 7min -) - const ( globalOptionsTimestampField = "e2e_timestamp" globalOptionsProbeIntervalField = "northd_probe_interval" @@ -381,8 +351,8 @@ func RegisterOVNKubeControllerBase() { prometheus.MustRegister(MetricOVNKubeControllerSyncDuration) prometheus.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "build_info", Help: "A metric with a constant '1' value labeled by version, revision, branch, " + "and go version from which ovnkube was built and when and who built it", @@ -410,11 +380,11 @@ func RegisterOVNKubeControllerPerformance(nbClient libovsdbclient.Client) { prometheus.MustRegister(MetricRequeueServiceCount) prometheus.MustRegister(MetricSyncServiceCount) prometheus.MustRegister(MetricSyncServiceLatency) - registerWorkqueueMetrics(MetricOvnkubeNamespace, MetricOvnkubeSubsystemController) + registerWorkqueueMetrics(types.MetricOvnkubeNamespace, types.MetricOvnkubeSubsystemController) prometheus.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnNamespace, - Subsystem: MetricOvnSubsystemNorthd, + Namespace: types.MetricOvnNamespace, + Subsystem: types.MetricOvnSubsystemNorthd, Name: "northd_probe_interval", Help: "The maximum number of milliseconds of idle time on connection to the OVN SB " + "and NB DB before sending an inactivity probe message", @@ -505,8 +475,8 @@ func RunTimestamp(stopChan <-chan struct{}, sbClient, nbClient libovsdbclient.Cl // cache when metrics HTTP endpoint is scraped. prometheus.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvnkubeNamespace, - Subsystem: MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "sb_e2e_timestamp", Help: "The current e2e-timestamp value as observed in the southbound database", }, func() float64 { @@ -916,521 +886,6 @@ func getPodUIDFromPortBinding(row *sbdb.PortBinding) kapimtypes.UID { return kapimtypes.UID(podUID) } -const ( - updateOVNMeasurementChSize = 500 - deleteOVNMeasurementChSize = 50 - processChSize = 1000 - nbGlobalTable = "NB_Global" - //fixme: remove when bug is fixed in OVN (Red Hat bugzilla bug number 2074019). Also, handle overflow event. - maxNbCfg = math.MaxUint32 - 1000 - maxMeasurementLifetime = 20 * time.Minute -) - -type ovnMeasurement struct { - // time just before ovsdb tx is called - startTimestamp time.Time - // time when the nbCfg value and its associated configuration is applied to all nodes - endTimestamp time.Time - // OVN measurement complete - start and end timestamps are valid - complete bool - // nb_cfg value that started the measurement - nbCfg int -} - -// measurement stores a measurement attempt through OVN-Kubernetes controller and optionally OVN -type measurement struct { - // kubernetes kind e.g. pod or service - kind string - // time when Add is executed - startTimestamp time.Time - // time when End is executed - endTimestamp time.Time - // if true endTimestamp is valid - end bool - // time when this measurement expires. Set during Add - expiresAt time.Time - // OVN measurement(s) via AddOVN - ovnMeasurements []ovnMeasurement -} - -// hvCfgUpdate holds the information received from OVN Northbound event handler -type hvCfgUpdate struct { - // timestamp is in milliseconds - timestamp int - hvCfg int -} - -type ConfigDurationRecorder struct { - // rate at which measurements are allowed. Probabilistically, 1 in every measurementRate - measurementRate uint64 - measurements map[string]measurement - // controls RW access to measurements map - measurementsMu sync.RWMutex - // channel to trigger processing a measurement following call to End func. Channel string is kind/namespace/name - triggerProcessCh chan string - enabled bool -} - -// global variable is needed because this functionality is accessed in many functions -var cdr *ConfigDurationRecorder - -// lock for accessing the cdr global variable -var cdrMutex sync.Mutex - -func GetConfigDurationRecorder() *ConfigDurationRecorder { - cdrMutex.Lock() - defer cdrMutex.Unlock() - if cdr == nil { - cdr = &ConfigDurationRecorder{} - } - return cdr -} - -var configDurationRegOnce sync.Once - -// Run monitors the config duration for OVN-Kube master to configure k8 kinds. A measurement maybe allowed and this is -// related to the number of k8 nodes, N [1] and by argument k [2] where there is a probability that 1 out of N*k -// measurement attempts are allowed. If k=0, all measurements are allowed. mUpdatePeriod determines the period to -// process and publish metrics -// [1] 1 0. The measurement rate is proportional to - // the number of nodes, N and argument k. 1 out of every N*k attempted measurements will succeed. - - // For the optional OVN measurement by calling AddOVN, when the CMS is about to make a transaction to configure - // whatever kind, a call to AddOVN function allows the caller to measure OVN duration. - // An ovsdb operation is returned to the caller of AddOVN, which they can bundle with their existing transactions - // sent to OVN which will tell OVN to measure how long it takes to configure all nodes with the config in the transaction. - // Config duration then waits for OVN to configure all nodes and calculates the time delta. - - // ** configuration duration recorder - caveats ** - // For the optional OVN recording, it does not give you an exact time duration for how long it takes to configure your - // k8 kind. When you are recording how long it takes OVN to complete your configuration to all nodes, other - // transactions may have occurred which may increases the overall time. You may also get longer processing times if one - // or more nodes are unavailable because we are measuring how long the functionality takes to apply to ALL nodes. - - // ** configuration duration recorder - How the duration of the config is measured within OVN ** - // We increment the nb_cfg integer value in the NB_Global table. - // ovn-northd notices the nb_cfg change and copies the nb_cfg value to SB_Global table field nb_cfg along with any - // other configuration that is changed in OVN Northbound database. - // All ovn-controllers detect nb_cfg value change and generate a 'barrier' on the openflow connection to the - // nodes ovs-vswitchd. Once ovn-controllers receive the 'barrier processed' reply from ovs-vswitchd which - // indicates that all relevant openflow operations associated with NB_Globals nb_cfg value have been - // propagated to the nodes OVS, it copies the SB_Global nb_cfg value to its Chassis_Private table nb_cfg record. - // ovn-northd detects changes to the Chassis_Private startRecords and computes the minimum nb_cfg for all Chassis_Private - // nb_cfg and stores this in NB_Global hv_cfg field along with a timestamp to field hv_cfg_timestamp which - // reflects the time when the slowest chassis catches up with the northbound configuration. - configDurationRegOnce.Do(func() { - prometheus.MustRegister(metricNetworkProgramming) - prometheus.MustRegister(metricNetworkProgrammingOVN) - }) - - cr.measurements = make(map[string]measurement) - // watch node count and adjust measurement rate if node count changes - cr.runMeasurementRateAdjuster(kube, k, time.Hour, stop) - // we currently do not clean the following channels up upon exit - cr.triggerProcessCh = make(chan string, processChSize) - updateOVNMeasurementCh := make(chan hvCfgUpdate, updateOVNMeasurementChSize) - deleteOVNMeasurementCh := make(chan int, deleteOVNMeasurementChSize) - go cr.processMeasurements(workerLoopPeriod, updateOVNMeasurementCh, deleteOVNMeasurementCh, stop) - - nbClient.Cache().AddEventHandler(&cache.EventHandlerFuncs{ - UpdateFunc: func(table string, old model.Model, new model.Model) { - if table != nbGlobalTable { - return - } - oldRow := old.(*nbdb.NBGlobal) - newRow := new.(*nbdb.NBGlobal) - - if oldRow.HvCfg != newRow.HvCfg && oldRow.HvCfgTimestamp != newRow.HvCfgTimestamp && newRow.HvCfgTimestamp > 0 { - select { - case updateOVNMeasurementCh <- hvCfgUpdate{hvCfg: newRow.HvCfg, timestamp: newRow.HvCfgTimestamp}: - default: - klog.Warning("Config duration recorder: unable to update OVN measurement") - select { - case deleteOVNMeasurementCh <- newRow.HvCfg: - default: - } - } - } - }, - }) - cr.enabled = true -} - -// Start allows the caller to attempt measurement of a control plane configuration duration, as a metric, -// the duration between functions Start and End. Optionally, if you wish to record OVN config duration, -// call AddOVN which will add the duration for OVN to apply the configuration to all nodes. -// The caller must pass kind,namespace,name which will be used to determine if the object -// is allowed to record. To allow no locking, each go routine that calls this function, can determine itself -// if it is allowed to measure. -// There is a mandatory two-step process to complete a measurement. -// Step 1) Call Start when you wish to begin a measurement - ideally when processing for the object starts -// Step 2) Call End which will complete a measurement -// Optionally, call AddOVN when you are making a transaction to OVN in order to add on the OVN duration to an existing -// measurement. This must be called between Start and End. Not every call to Start will result in a measurement -// and the rate of measurements depends on the number of nodes and function Run arg k. -// Only one measurement for a kind/namespace/name is allowed until the current measurement is Ended (via End) and -// processed. This is guaranteed by workqueues (even with multiple workers) and informer event handlers. -func (cr *ConfigDurationRecorder) Start(kind, namespace, name string) (time.Time, bool) { - if !cr.enabled { - return time.Time{}, false - } - kindNamespaceName := fmt.Sprintf("%s/%s/%s", kind, namespace, name) - if !cr.allowedToMeasure(kindNamespaceName) { - return time.Time{}, false - } - measurementTimestamp := time.Now() - cr.measurementsMu.Lock() - _, found := cr.measurements[kindNamespaceName] - // we only record for measurements that aren't in-progress - if !found { - cr.measurements[kindNamespaceName] = measurement{kind: kind, startTimestamp: measurementTimestamp, - expiresAt: measurementTimestamp.Add(maxMeasurementLifetime)} - } - cr.measurementsMu.Unlock() - return measurementTimestamp, !found -} - -// allowedToMeasure determines if we are allowed to measure or not. To avoid the cost of synchronisation by using locks, -// we use probability. For a value of kindNamespaceName that returns true, it will always return true. -func (cr *ConfigDurationRecorder) allowedToMeasure(kindNamespaceName string) bool { - if cr.measurementRate == 0 { - return true - } - // 1 in measurementRate chance of true - if hashToNumber(kindNamespaceName)%cr.measurementRate == 0 { - return true - } - return false -} - -func (cr *ConfigDurationRecorder) End(kind, namespace, name string) time.Time { - if !cr.enabled { - return time.Time{} - } - kindNamespaceName := fmt.Sprintf("%s/%s/%s", kind, namespace, name) - if !cr.allowedToMeasure(kindNamespaceName) { - return time.Time{} - } - measurementTimestamp := time.Now() - cr.measurementsMu.Lock() - if m, ok := cr.measurements[kindNamespaceName]; ok { - if !m.end { - m.end = true - m.endTimestamp = measurementTimestamp - cr.measurements[kindNamespaceName] = m - // if there are no OVN measurements, trigger immediate processing - if len(m.ovnMeasurements) == 0 { - select { - case cr.triggerProcessCh <- kindNamespaceName: - default: - // doesn't matter if channel is full because the measurement will be processed later anyway - } - } - } - } else { - // This can happen if Start was rejected for a resource because a measurement was in-progress for this - // kind/namespace/name, but during execution of this resource, the measurement was completed and now no record - // is found. - measurementTimestamp = time.Time{} - } - cr.measurementsMu.Unlock() - return measurementTimestamp -} - -// AddOVN adds OVN config duration to an existing recording - previously started by calling function Start -// It will return ovsdb operations which a user can add to existing operations they wish to track. -// Upon successful transaction of the operations to the ovsdb server, the user of this function must call a call-back -// function to lock-in the request to measure and report. Failure to call the call-back function, will result in no OVN -// measurement and no metrics are reported. AddOVN will result in a no-op if Start isn't called previously for the same -// kind/namespace/name. -// If multiple AddOVN is called between Start and End for the same kind/namespace/name, then the -// OVN durations will be summed and added to the total. There is an assumption that processing of kind/namespace/name is -// sequential -func (cr *ConfigDurationRecorder) AddOVN(nbClient libovsdbclient.Client, kind, namespace, name string) ( - []ovsdb.Operation, func(), time.Time, error) { - if !cr.enabled { - return []ovsdb.Operation{}, func() {}, time.Time{}, nil - } - kindNamespaceName := fmt.Sprintf("%s/%s/%s", kind, namespace, name) - if !cr.allowedToMeasure(kindNamespaceName) { - return []ovsdb.Operation{}, func() {}, time.Time{}, nil - } - cr.measurementsMu.RLock() - m, ok := cr.measurements[kindNamespaceName] - cr.measurementsMu.RUnlock() - if !ok { - // no measurement found, therefore no-op - return []ovsdb.Operation{}, func() {}, time.Time{}, nil - } - if m.end { - // existing measurement in-progress and not processed yet, therefore no-op - return []ovsdb.Operation{}, func() {}, time.Time{}, nil - } - nbGlobal := &nbdb.NBGlobal{} - nbGlobal, err := libovsdbops.GetNBGlobal(nbClient, nbGlobal) - if err != nil { - return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("failed to find OVN Northbound NB_Global table"+ - " entry: %v", err) - } - if nbGlobal.NbCfg < 0 { - return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("nb_cfg is negative, failed to add OVN measurement") - } - //stop recording if we are close to overflow - if nbGlobal.NbCfg > maxNbCfg { - return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("unable to measure OVN due to nb_cfg being close to overflow") - } - ops, err := nbClient.Where(nbGlobal).Mutate(nbGlobal, model.Mutation{ - Field: &nbGlobal.NbCfg, - Mutator: ovsdb.MutateOperationAdd, - Value: 1, - }) - if err != nil { - return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("failed to create update operation: %v", err) - } - ovnStartTimestamp := time.Now() - - return ops, func() { - // there can be a race condition here where we queue the wrong nbCfg value, but it is ok as long as it is - // less than or equal the hv_cfg value we see and this is the case because of atomic increments for nb_cfg - cr.measurementsMu.Lock() - m, ok = cr.measurements[kindNamespaceName] - if !ok { - klog.Errorf("Config duration recorder: expected a measurement entry. Call Start before AddOVN"+ - " for %s", kindNamespaceName) - cr.measurementsMu.Unlock() - return - } - m.ovnMeasurements = append(m.ovnMeasurements, ovnMeasurement{startTimestamp: ovnStartTimestamp, - nbCfg: nbGlobal.NbCfg + 1}) - cr.measurements[kindNamespaceName] = m - cr.measurementsMu.Unlock() - }, ovnStartTimestamp, nil -} - -// runMeasurementRateAdjuster will adjust the rate of measurements based on the number of nodes in the cluster and arg k -func (cr *ConfigDurationRecorder) runMeasurementRateAdjuster(kube kube.Interface, k float64, nodeCheckPeriod time.Duration, - stop <-chan struct{}) { - var currentMeasurementRate, newMeasurementRate uint64 - - updateMeasurementRate := func() { - if nodeCount, err := getNodeCount(kube); err != nil { - klog.Errorf("Config duration recorder: failed to update ticker duration considering node count: %v", err) - } else { - newMeasurementRate = uint64(math.Round(k * float64(nodeCount))) - if newMeasurementRate != currentMeasurementRate { - if newMeasurementRate > 0 { - currentMeasurementRate = newMeasurementRate - cr.measurementRate = newMeasurementRate - } - klog.V(5).Infof("Config duration recorder: updated measurement rate to approx 1 in"+ - " every %d requests", newMeasurementRate) - } - } - } - - // initial measurement rate adjustment - updateMeasurementRate() - - go func() { - nodeCheckTicker := time.NewTicker(nodeCheckPeriod) - for { - select { - case <-nodeCheckTicker.C: - updateMeasurementRate() - case <-stop: - nodeCheckTicker.Stop() - return - } - } - }() -} - -// processMeasurements manages the measurements map. It calculates metrics and cleans up finished or stale measurements -func (cr *ConfigDurationRecorder) processMeasurements(period time.Duration, updateOVNMeasurementCh chan hvCfgUpdate, - deleteOVNMeasurementCh chan int, stop <-chan struct{}) { - ticker := time.NewTicker(period) - var ovnKDelta, ovnDelta float64 - - for { - select { - case <-stop: - ticker.Stop() - return - // remove measurements if channel updateOVNMeasurementCh overflows, therefore we cannot trust existing measurements - case hvCfg := <-deleteOVNMeasurementCh: - cr.measurementsMu.Lock() - removeOVNMeasurements(cr.measurements, hvCfg) - cr.measurementsMu.Unlock() - case h := <-updateOVNMeasurementCh: - cr.measurementsMu.Lock() - cr.addHvCfg(h.hvCfg, h.timestamp) - cr.measurementsMu.Unlock() - // used for processing measurements that didn't require OVN measurement. Helps to keep measurement map small - case kindNamespaceName := <-cr.triggerProcessCh: - cr.measurementsMu.Lock() - m, ok := cr.measurements[kindNamespaceName] - if !ok { - klog.Errorf("Config duration recorder: expected measurement, but not found") - cr.measurementsMu.Unlock() - continue - } - if !m.end { - cr.measurementsMu.Unlock() - continue - } - if len(m.ovnMeasurements) != 0 { - cr.measurementsMu.Unlock() - continue - } - ovnKDelta = m.endTimestamp.Sub(m.startTimestamp).Seconds() - metricNetworkProgramming.With(prometheus.Labels{"kind": m.kind}).Observe(ovnKDelta) - klog.V(5).Infof("Config duration recorder: kind/namespace/name %s. OVN-Kubernetes controller took %v"+ - " seconds. No OVN measurement.", kindNamespaceName, ovnKDelta) - delete(cr.measurements, kindNamespaceName) - cr.measurementsMu.Unlock() - // used for processing measurements that require OVN measurement or do not or are expired. - case <-ticker.C: - start := time.Now() - cr.measurementsMu.Lock() - // process and clean up measurements - for kindNamespaceName, m := range cr.measurements { - if start.After(m.expiresAt) { - // measurement may expire if OVN is degraded or End wasn't called - klog.Warningf("Config duration recorder: measurement expired for %s", kindNamespaceName) - delete(cr.measurements, kindNamespaceName) - continue - } - if !m.end { - // measurement didn't end yet, process later - continue - } - // for when no ovn measurements requested - if len(m.ovnMeasurements) == 0 { - ovnKDelta = m.endTimestamp.Sub(m.startTimestamp).Seconds() - metricNetworkProgramming.With(prometheus.Labels{"kind": m.kind}).Observe(ovnKDelta) - klog.V(5).Infof("Config duration recorder: kind/namespace/name %s. OVN-Kubernetes controller"+ - " took %v seconds. No OVN measurement.", kindNamespaceName, ovnKDelta) - delete(cr.measurements, kindNamespaceName) - continue - } - // for each kind/namespace/name, there can be multiple calls to AddOVN between start and end - // we sum all the OVN durations and add it to the start and end duration - // first lets make sure all OVN measurements are finished - if complete := allOVNMeasurementsComplete(m.ovnMeasurements); !complete { - continue - } - - ovnKDelta = m.endTimestamp.Sub(m.startTimestamp).Seconds() - ovnDelta = calculateOVNDuration(m.ovnMeasurements) - metricNetworkProgramming.With(prometheus.Labels{"kind": m.kind}).Observe(ovnKDelta + ovnDelta) - metricNetworkProgrammingOVN.Observe(ovnDelta) - klog.V(5).Infof("Config duration recorder: kind/namespace/name %s. OVN-Kubernetes controller took"+ - " %v seconds. OVN took %v seconds. Total took %v seconds", kindNamespaceName, ovnKDelta, - ovnDelta, ovnDelta+ovnKDelta) - delete(cr.measurements, kindNamespaceName) - } - cr.measurementsMu.Unlock() - } - } -} - -func (cr *ConfigDurationRecorder) addHvCfg(hvCfg, hvCfgTimestamp int) { - var altered bool - for i, m := range cr.measurements { - altered = false - for iOvnM, ovnM := range m.ovnMeasurements { - if ovnM.complete { - continue - } - if ovnM.nbCfg <= hvCfg { - ovnM.endTimestamp = time.UnixMilli(int64(hvCfgTimestamp)) - ovnM.complete = true - m.ovnMeasurements[iOvnM] = ovnM - altered = true - } - } - if altered { - cr.measurements[i] = m - } - } -} - -// removeOVNMeasurements remove any OVN measurements less than or equal argument hvCfg -func removeOVNMeasurements(measurements map[string]measurement, hvCfg int) { - for kindNamespaceName, m := range measurements { - var indexToDelete []int - for i, ovnM := range m.ovnMeasurements { - if ovnM.nbCfg <= hvCfg { - indexToDelete = append(indexToDelete, i) - } - } - if len(indexToDelete) == 0 { - continue - } - if len(indexToDelete) == len(m.ovnMeasurements) { - delete(measurements, kindNamespaceName) - } - for _, iDel := range indexToDelete { - m.ovnMeasurements = removeOVNMeasurement(m.ovnMeasurements, iDel) - } - measurements[kindNamespaceName] = m - } -} - -func removeOVNMeasurement(oM []ovnMeasurement, i int) []ovnMeasurement { - oM[i] = oM[len(oM)-1] - return oM[:len(oM)-1] -} -func hashToNumber(s string) uint64 { - h := fnv.New64() - h.Write([]byte(s)) - return h.Sum64() -} - -func calculateOVNDuration(ovnMeasurements []ovnMeasurement) float64 { - var totalDuration float64 - for _, oM := range ovnMeasurements { - if !oM.complete { - continue - } - totalDuration += oM.endTimestamp.Sub(oM.startTimestamp).Seconds() - } - return totalDuration -} - -func allOVNMeasurementsComplete(ovnMeasurements []ovnMeasurement) bool { - for _, oM := range ovnMeasurements { - if !oM.complete { - return false - } - } - return true -} - -// merge direct copy from k8 pkg/proxy/metrics/metrics.go -func merge(slices ...[]float64) []float64 { - result := make([]float64, 1) - for _, s := range slices { - result = append(result, s...) - } - return result -} - -func getNodeCount(kube kube.Interface) (int, error) { - nodes, err := kube.GetNodes() - if err != nil { - return 0, fmt.Errorf("unable to retrieve node list: %v", err) - } - return len(nodes), nil -} - // setNbE2eTimestamp return true if setting timestamp to NB global options is successful func setNbE2eTimestamp(ovnNBClient libovsdbclient.Client, timestamp int64) bool { // assumption that only first row is relevant in NB_Global table diff --git a/go-controller/pkg/metrics/ovs.go b/go-controller/pkg/metrics/ovs.go index b2cc1403a0..718fa031e7 100644 --- a/go-controller/pkg/metrics/ovs.go +++ b/go-controller/pkg/metrics/ovs.go @@ -14,10 +14,11 @@ import ( "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" ovsops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops/ovs" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -27,15 +28,15 @@ var ( // ovs datapath Metrics var metricOvsDpTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_total", Help: "Represents total number of datapaths on the system.", }) var metricOvsDp = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp", Help: "A metric with a constant '1' value labeled by datapath " + "name present on the instance."}, @@ -46,8 +47,8 @@ var metricOvsDp = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpIfTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_if_total", Help: "Represents the number of ports connected to the datapath."}, []string{ @@ -56,8 +57,8 @@ var metricOvsDpIfTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpFlowsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_flows_total", Help: "Represents the number of flows in datapath."}, []string{ @@ -66,8 +67,8 @@ var metricOvsDpFlowsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpFlowsLookupHit = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_flows_lookup_hit", Help: "Represents number of packets matching the existing flows " + "while processing incoming packets in the datapath."}, @@ -77,8 +78,8 @@ var metricOvsDpFlowsLookupHit = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpFlowsLookupMissed = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_flows_lookup_missed", Help: "Represents the number of packets not matching any existing " + "flow and require user space processing."}, @@ -88,8 +89,8 @@ var metricOvsDpFlowsLookupMissed = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpFlowsLookupLost = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_flows_lookup_lost", Help: "number of packets destined for user space process but " + "subsequently dropped before reaching userspace."}, @@ -99,8 +100,8 @@ var metricOvsDpFlowsLookupLost = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpPacketsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_packets_total", Help: "Represents the total number of packets datapath processed " + "which is the sum of hit and missed."}, @@ -110,8 +111,8 @@ var metricOvsDpPacketsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsdpMasksHit = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_masks_hit", Help: "Represents the total number of masks visited for matching incoming packets.", }, @@ -121,8 +122,8 @@ var metricOvsdpMasksHit = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpMasksTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_masks_total", Help: "Represents the number of masks in a datapath."}, []string{ @@ -131,8 +132,8 @@ var metricOvsDpMasksTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsDpMasksHitRatio = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "dp_masks_hit_ratio", Help: "Represents the average number of masks visited per packet " + "the ratio between hit and total number of packets processed by the datapath."}, @@ -143,16 +144,16 @@ var metricOvsDpMasksHitRatio = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // ovs bridge statistics & attributes metrics var metricOvsBridgeTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "bridge_total", Help: "Represents total number of OVS bridges on the system.", }, ) var metricOvsBridge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "bridge", Help: "A metric with a constant '1' value labeled by bridge name " + "present on the instance."}, @@ -162,8 +163,8 @@ var metricOvsBridge = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsBridgePortsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "bridge_ports_total", Help: "Represents the number of OVS ports on the bridge."}, []string{ @@ -172,8 +173,8 @@ var metricOvsBridgePortsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ ) var metricOvsBridgeFlowsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "bridge_flows_total", Help: "Represents the number of OpenFlow flows on the OVS bridge."}, []string{ @@ -183,57 +184,57 @@ var metricOvsBridgeFlowsTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ // ovs interface metrics var metricOvsInterfaceResetsTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interface_resets_total", Help: "The number of link state changes observed by Open vSwitch interface(s).", }) var metricOvsInterfaceRxDroppedTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interface_rx_dropped_total", Help: "The total number of received packets dropped by Open vSwitch interface(s).", }) var metricOvsInterfaceTxDroppedTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interface_tx_dropped_total", Help: "The total number of transmitted packets dropped by Open vSwitch interface(s).", }) var metricOvsInterfaceRxErrorsTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interface_rx_errors_total", Help: "The total number of received packets with errors by Open vSwitch interface(s).", }) var metricOvsInterfaceTxErrorsTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interface_tx_errors_total", Help: "The total number of transmitted packets with errors by Open vSwitch interface(s).", }) var metricOvsInterfaceCollisionsTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interface_collisions_total", Help: "The total number of packet collisions transmitted by Open vSwitch interface(s).", }) var metricOvsInterfaceTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interfaces_total", Help: "The total number of Open vSwitch interface(s) created for pods", }) var MetricOvsInterfaceUpWait = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "interface_up_wait_seconds_total", Help: "The total number of seconds that is required to wait for pod " + "Open vSwitch interface until its available", @@ -241,16 +242,16 @@ var MetricOvsInterfaceUpWait = prometheus.NewCounter(prometheus.CounterOpts{ // ovs memory metrics var metricOvsHandlersTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "handlers_total", Help: "Represents the number of handlers thread. This thread reads upcalls from dpif, " + "forwards each upcall's packet and possibly sets up a kernel flow as a cache.", }) var metricOvsRevalidatorsTotal = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "revalidators_total", Help: "Represents the number of revalidators thread. This thread processes datapath flows, " + "updates OpenFlow statistics, and updates or removes them if necessary.", @@ -258,16 +259,16 @@ var metricOvsRevalidatorsTotal = prometheus.NewGauge(prometheus.GaugeOpts{ // ovs Hw offload metrics var metricOvsHwOffload = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "hw_offload", Help: "Represents whether netdev flow offload to hardware is enabled " + "or not -- false(0) and true(1).", }) var metricOvsTcPolicy = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, - Subsystem: MetricOvsSubsystemVswitchd, + Namespace: types.MetricOvsNamespace, + Subsystem: types.MetricOvsSubsystemVswitchd, Name: "tc_policy", Help: "Represents the policy used with HW offloading " + "-- none(0), skip_sw(1), and skip_hw(2).", @@ -310,15 +311,15 @@ func ovsDatapathLookupsMetrics(output, datapath string) { } switch elem[0] { case "hit": - value := parseMetricToFloat(MetricOvsSubsystemVswitchd, "dp_flows_lookup_hit", elem[1]) + value := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "dp_flows_lookup_hit", elem[1]) datapathPacketsTotal += value metricOvsDpFlowsLookupHit.WithLabelValues(datapath).Set(value) case "missed": - value := parseMetricToFloat(MetricOvsSubsystemVswitchd, "dp_flows_lookup_missed", elem[1]) + value := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "dp_flows_lookup_missed", elem[1]) datapathPacketsTotal += value metricOvsDpFlowsLookupMissed.WithLabelValues(datapath).Set(value) case "lost": - value := parseMetricToFloat(MetricOvsSubsystemVswitchd, "dp_flows_lookup_lost", elem[1]) + value := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "dp_flows_lookup_lost", elem[1]) metricOvsDpFlowsLookupLost.WithLabelValues(datapath).Set(value) } } @@ -335,13 +336,13 @@ func ovsDatapathMasksMetrics(output, datapath string) { } switch elem[0] { case "hit": - value := parseMetricToFloat(MetricOvsSubsystemVswitchd, "dp_masks_hit", elem[1]) + value := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "dp_masks_hit", elem[1]) metricOvsdpMasksHit.WithLabelValues(datapath).Set(value) case "total": - value := parseMetricToFloat(MetricOvsSubsystemVswitchd, "dp_masks_total", elem[1]) + value := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "dp_masks_total", elem[1]) metricOvsDpMasksTotal.WithLabelValues(datapath).Set(value) case "hit/pkt": - value := parseMetricToFloat(MetricOvsSubsystemVswitchd, "dp_masks_hit_ratio", elem[1]) + value := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "dp_masks_hit_ratio", elem[1]) metricOvsDpMasksHitRatio.WithLabelValues(datapath).Set(value) } } @@ -419,7 +420,7 @@ func setOvsDatapathMetrics(ovsAppctl ovsClient, datapaths []string) (err error) datapathPortCount++ } else if strings.HasPrefix(output, "flows:") { flowFields := strings.Fields(output) - value := parseMetricToFloat(MetricOvsSubsystemVswitchd, "dp_flows_total", flowFields[1]) + value := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "dp_flows_total", flowFields[1]) metricOvsDpFlowsTotal.WithLabelValues(datapathName).Set(value) } } @@ -504,7 +505,7 @@ func getOvsBridgeOpenFlowsCount(ovsOfctl ovsClient, bridgeName string) (float64, if strings.HasPrefix(kvPair, "flow_count=") { value := strings.Split(kvPair, "=")[1] metricName := bridgeName + "flows_total" - return parseMetricToFloat(MetricOvsSubsystemVswitchd, metricName, value), nil + return parseMetricToFloat(types.MetricOvsSubsystemVswitchd, metricName, value), nil } } return 0, fmt.Errorf("ovs-ofctl dump-aggregate %s output didn't contain "+ @@ -595,11 +596,11 @@ func setOvsMemoryMetrics(ovsVswitchdAppctl ovsClient) (err error) { for _, kvPair := range strings.Fields(stdout) { if strings.HasPrefix(kvPair, "handlers:") { value := strings.Split(kvPair, ":")[1] - count := parseMetricToFloat(MetricOvsSubsystemVswitchd, "handlers_total", value) + count := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "handlers_total", value) metricOvsHandlersTotal.Set(count) } else if strings.HasPrefix(kvPair, "revalidators:") { value := strings.Split(kvPair, ":")[1] - count := parseMetricToFloat(MetricOvsSubsystemVswitchd, "revalidators_total", value) + count := parseMetricToFloat(types.MetricOvsSubsystemVswitchd, "revalidators_total", value) metricOvsRevalidatorsTotal.Set(count) } } @@ -846,7 +847,7 @@ func registerOvsMetrics(ovsDBClient libovsdbclient.Client, metricsScrapeInterval getOvsVersionInfo(ovsDBClient) registry.MustRegister(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ - Namespace: MetricOvsNamespace, + Namespace: types.MetricOvsNamespace, Name: "build_info", Help: "A metric with a constant '1' value labeled by ovs version.", ConstLabels: prometheus.Labels{ @@ -890,18 +891,18 @@ func registerOvsMetrics(ovsDBClient libovsdbclient.Client, metricsScrapeInterval registry.MustRegister(MetricOvsInterfaceUpWait) // Register the OVS coverage/show metrics componentCoverageShowMetricsMap[ovsVswitchd] = ovsVswitchdCoverageShowMetricsMap - registerCoverageShowMetrics(ovsVswitchd, MetricOvsNamespace, MetricOvsSubsystemVswitchd) + registerCoverageShowMetrics(ovsVswitchd, types.MetricOvsNamespace, types.MetricOvsSubsystemVswitchd) // When ovnkube-node is running in privileged mode, the hostPID will be set to true, // and therefore it can monitor OVS running on the host using PID. if !config.UnprivilegedMode { registry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{ PidFn: prometheus.NewPidFileFn("/var/run/openvswitch/ovs-vswitchd.pid"), - Namespace: fmt.Sprintf("%s_%s", MetricOvsNamespace, MetricOvsSubsystemVswitchd), + Namespace: fmt.Sprintf("%s_%s", types.MetricOvsNamespace, types.MetricOvsSubsystemVswitchd), })) registry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{ PidFn: prometheus.NewPidFileFn("/var/run/openvswitch/ovsdb-server.pid"), - Namespace: fmt.Sprintf("%s_%s", MetricOvsNamespace, MetricOvsSubsystemDB), + Namespace: fmt.Sprintf("%s_%s", types.MetricOvsNamespace, types.MetricOvsSubsystemDB), })) } diff --git a/go-controller/pkg/metrics/recorders/duration.go b/go-controller/pkg/metrics/recorders/duration.go new file mode 100644 index 0000000000..4376283c20 --- /dev/null +++ b/go-controller/pkg/metrics/recorders/duration.go @@ -0,0 +1,565 @@ +package recorders + +import ( + "fmt" + "hash/fnv" + "math" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/klog/v2" + + "github.com/ovn-kubernetes/libovsdb/cache" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +const ( + updateOVNMeasurementChSize = 500 + deleteOVNMeasurementChSize = 50 + processChSize = 1000 + nbGlobalTable = "NB_Global" + //fixme: remove when bug is fixed in OVN (Red Hat bugzilla bug number 2074019). Also, handle overflow event. + maxNbCfg = math.MaxUint32 - 1000 + maxMeasurementLifetime = 20 * time.Minute +) + +var configDurationRegOnce sync.Once + +type ConfigDurationRecorder struct { + // rate at which measurements are allowed. Probabilistically, 1 in every measurementRate + measurementRate uint64 + measurements map[string]measurement + // controls RW access to measurements map + measurementsMu sync.RWMutex + // channel to trigger processing a measurement following call to End func. Channel string is kind/namespace/name + triggerProcessCh chan string + enabled bool +} + +type ovnMeasurement struct { + // time just before ovsdb tx is called + startTimestamp time.Time + // time when the nbCfg value and its associated configuration is applied to all nodes + endTimestamp time.Time + // OVN measurement complete - start and end timestamps are valid + complete bool + // nb_cfg value that started the measurement + nbCfg int +} + +// measurement stores a measurement attempt through OVN-Kubernetes controller and optionally OVN +type measurement struct { + // kubernetes kind e.g. pod or service + kind string + // time when Add is executed + startTimestamp time.Time + // time when End is executed + endTimestamp time.Time + // if true endTimestamp is valid + end bool + // time when this measurement expires. Set during Add + expiresAt time.Time + // OVN measurement(s) via AddOVN + ovnMeasurements []ovnMeasurement +} + +// hvCfgUpdate holds the information received from OVN Northbound event handler +type hvCfgUpdate struct { + // timestamp is in milliseconds + timestamp int + hvCfg int +} + +// global variable is needed because this functionality is accessed in many functions +var cdr *ConfigDurationRecorder + +// lock for accessing the cdr global variable +var cdrMutex sync.Mutex + +func GetConfigDurationRecorder() *ConfigDurationRecorder { + cdrMutex.Lock() + defer cdrMutex.Unlock() + if cdr == nil { + cdr = &ConfigDurationRecorder{} + } + return cdr +} + +// removeOVNMeasurements remove any OVN measurements less than or equal argument hvCfg +func removeOVNMeasurements(measurements map[string]measurement, hvCfg int) { + for kindNamespaceName, m := range measurements { + var indexToDelete []int + for i, ovnM := range m.ovnMeasurements { + if ovnM.nbCfg <= hvCfg { + indexToDelete = append(indexToDelete, i) + } + } + if len(indexToDelete) == 0 { + continue + } + if len(indexToDelete) == len(m.ovnMeasurements) { + delete(measurements, kindNamespaceName) + } + for _, iDel := range indexToDelete { + m.ovnMeasurements = removeOVNMeasurement(m.ovnMeasurements, iDel) + } + measurements[kindNamespaceName] = m + } +} + +var metricNetworkProgramming prometheus.ObserverVec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, + Name: "network_programming_duration_seconds", + Help: "The duration to apply network configuration for a kind (e.g. pod, service, networkpolicy). " + + "Configuration includes add, update and delete events for each kind.", + Buckets: merge( + prometheus.LinearBuckets(0.25, 0.25, 2), // 0.25s, 0.50s + prometheus.LinearBuckets(1, 1, 59), // 1s, 2s, 3s, ... 59s + prometheus.LinearBuckets(60, 5, 12), // 60s, 65s, 70s, ... 115s + prometheus.LinearBuckets(120, 30, 11))}, // 2min, 2.5min, 3min, ..., 7min + []string{ + "kind", + }) + +var metricNetworkProgrammingOVN = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, + Name: "network_programming_ovn_duration_seconds", + Help: "The duration for OVN to apply network configuration", + Buckets: merge( + prometheus.LinearBuckets(0.25, 0.25, 2), // 0.25s, 0.50s + prometheus.LinearBuckets(1, 1, 59), // 1s, 2s, 3s, ... 59s + prometheus.LinearBuckets(60, 5, 12), // 60s, 65s, 70s, ... 115s + prometheus.LinearBuckets(120, 30, 11))}, // 2min, 2.5min, 3min, ..., 7min +) + +// Run monitors the config duration for OVN-Kube master to configure k8 kinds. A measurement maybe allowed and this is +// related to the number of k8 nodes, N [1] and by argument k [2] where there is a probability that 1 out of N*k +// measurement attempts are allowed. If k=0, all measurements are allowed. mUpdatePeriod determines the period to +// process and publish metrics +// [1] 1 0. The measurement rate is proportional to + // the number of nodes, N and argument k. 1 out of every N*k attempted measurements will succeed. + + // For the optional OVN measurement by calling AddOVN, when the CMS is about to make a transaction to configure + // whatever kind, a call to AddOVN function allows the caller to measure OVN duration. + // An ovsdb operation is returned to the caller of AddOVN, which they can bundle with their existing transactions + // sent to OVN which will tell OVN to measure how long it takes to configure all nodes with the config in the transaction. + // Config duration then waits for OVN to configure all nodes and calculates the time delta. + + // ** configuration duration recorder - caveats ** + // For the optional OVN recording, it does not give you an exact time duration for how long it takes to configure your + // k8 kind. When you are recording how long it takes OVN to complete your configuration to all nodes, other + // transactions may have occurred which may increases the overall time. You may also get longer processing times if one + // or more nodes are unavailable because we are measuring how long the functionality takes to apply to ALL nodes. + + // ** configuration duration recorder - How the duration of the config is measured within OVN ** + // We increment the nb_cfg integer value in the NB_Global table. + // ovn-northd notices the nb_cfg change and copies the nb_cfg value to SB_Global table field nb_cfg along with any + // other configuration that is changed in OVN Northbound database. + // All ovn-controllers detect nb_cfg value change and generate a 'barrier' on the openflow connection to the + // nodes ovs-vswitchd. Once ovn-controllers receive the 'barrier processed' reply from ovs-vswitchd which + // indicates that all relevant openflow operations associated with NB_Globals nb_cfg value have been + // propagated to the nodes OVS, it copies the SB_Global nb_cfg value to its Chassis_Private table nb_cfg record. + // ovn-northd detects changes to the Chassis_Private startRecords and computes the minimum nb_cfg for all Chassis_Private + // nb_cfg and stores this in NB_Global hv_cfg field along with a timestamp to field hv_cfg_timestamp which + // reflects the time when the slowest chassis catches up with the northbound configuration. + configDurationRegOnce.Do(func() { + prometheus.MustRegister(metricNetworkProgramming) + prometheus.MustRegister(metricNetworkProgrammingOVN) + }) + + cr.measurements = make(map[string]measurement) + // watch node count and adjust measurement rate if node count changes + cr.runMeasurementRateAdjuster(wf, k, time.Hour, stop) + // we currently do not clean the following channels up upon exit + cr.triggerProcessCh = make(chan string, processChSize) + updateOVNMeasurementCh := make(chan hvCfgUpdate, updateOVNMeasurementChSize) + deleteOVNMeasurementCh := make(chan int, deleteOVNMeasurementChSize) + go cr.processMeasurements(workerLoopPeriod, updateOVNMeasurementCh, deleteOVNMeasurementCh, stop) + + nbClient.Cache().AddEventHandler(&cache.EventHandlerFuncs{ + UpdateFunc: func(table string, old model.Model, new model.Model) { + if table != nbGlobalTable { + return + } + oldRow := old.(*nbdb.NBGlobal) + newRow := new.(*nbdb.NBGlobal) + + if oldRow.HvCfg != newRow.HvCfg && oldRow.HvCfgTimestamp != newRow.HvCfgTimestamp && newRow.HvCfgTimestamp > 0 { + select { + case updateOVNMeasurementCh <- hvCfgUpdate{hvCfg: newRow.HvCfg, timestamp: newRow.HvCfgTimestamp}: + default: + klog.Warning("Config duration recorder: unable to update OVN measurement") + select { + case deleteOVNMeasurementCh <- newRow.HvCfg: + default: + } + } + } + }, + }) + cr.enabled = true +} + +// Start allows the caller to attempt measurement of a control plane configuration duration, as a metric, +// the duration between functions Start and End. Optionally, if you wish to record OVN config duration, +// call AddOVN which will add the duration for OVN to apply the configuration to all nodes. +// The caller must pass kind,namespace,name which will be used to determine if the object +// is allowed to record. To allow no locking, each go routine that calls this function, can determine itself +// if it is allowed to measure. +// There is a mandatory two-step process to complete a measurement. +// Step 1) Call Start when you wish to begin a measurement - ideally when processing for the object starts +// Step 2) Call End which will complete a measurement +// Optionally, call AddOVN when you are making a transaction to OVN in order to add on the OVN duration to an existing +// measurement. This must be called between Start and End. Not every call to Start will result in a measurement +// and the rate of measurements depends on the number of nodes and function Run arg k. +// Only one measurement for a kind/namespace/name is allowed until the current measurement is Ended (via End) and +// processed. This is guaranteed by workqueues (even with multiple workers) and informer event handlers. +func (cr *ConfigDurationRecorder) Start(kind, namespace, name string) (time.Time, bool) { + if !cr.enabled { + return time.Time{}, false + } + kindNamespaceName := fmt.Sprintf("%s/%s/%s", kind, namespace, name) + if !cr.allowedToMeasure(kindNamespaceName) { + return time.Time{}, false + } + measurementTimestamp := time.Now() + cr.measurementsMu.Lock() + _, found := cr.measurements[kindNamespaceName] + // we only record for measurements that aren't in-progress + if !found { + cr.measurements[kindNamespaceName] = measurement{kind: kind, startTimestamp: measurementTimestamp, + expiresAt: measurementTimestamp.Add(maxMeasurementLifetime)} + } + cr.measurementsMu.Unlock() + return measurementTimestamp, !found +} + +// allowedToMeasure determines if we are allowed to measure or not. To avoid the cost of synchronisation by using locks, +// we use probability. For a value of kindNamespaceName that returns true, it will always return true. +func (cr *ConfigDurationRecorder) allowedToMeasure(kindNamespaceName string) bool { + if cr.measurementRate == 0 { + return true + } + // 1 in measurementRate chance of true + if hashToNumber(kindNamespaceName)%cr.measurementRate == 0 { + return true + } + return false +} + +func (cr *ConfigDurationRecorder) End(kind, namespace, name string) time.Time { + if !cr.enabled { + return time.Time{} + } + kindNamespaceName := fmt.Sprintf("%s/%s/%s", kind, namespace, name) + if !cr.allowedToMeasure(kindNamespaceName) { + return time.Time{} + } + measurementTimestamp := time.Now() + cr.measurementsMu.Lock() + if m, ok := cr.measurements[kindNamespaceName]; ok { + if !m.end { + m.end = true + m.endTimestamp = measurementTimestamp + cr.measurements[kindNamespaceName] = m + // if there are no OVN measurements, trigger immediate processing + if len(m.ovnMeasurements) == 0 { + select { + case cr.triggerProcessCh <- kindNamespaceName: + default: + // doesn't matter if channel is full because the measurement will be processed later anyway + } + } + } + } else { + // This can happen if Start was rejected for a resource because a measurement was in-progress for this + // kind/namespace/name, but during execution of this resource, the measurement was completed and now no record + // is found. + measurementTimestamp = time.Time{} + } + cr.measurementsMu.Unlock() + return measurementTimestamp +} + +// AddOVN adds OVN config duration to an existing recording - previously started by calling function Start +// It will return ovsdb operations which a user can add to existing operations they wish to track. +// Upon successful transaction of the operations to the ovsdb server, the user of this function must call a call-back +// function to lock-in the request to measure and report. Failure to call the call-back function, will result in no OVN +// measurement and no metrics are reported. AddOVN will result in a no-op if Start isn't called previously for the same +// kind/namespace/name. +// If multiple AddOVN is called between Start and End for the same kind/namespace/name, then the +// OVN durations will be summed and added to the total. There is an assumption that processing of kind/namespace/name is +// sequential +func (cr *ConfigDurationRecorder) AddOVN(nbClient libovsdbclient.Client, kind, namespace, name string) ( + []ovsdb.Operation, func(), time.Time, error) { + if !cr.enabled { + return []ovsdb.Operation{}, func() {}, time.Time{}, nil + } + kindNamespaceName := fmt.Sprintf("%s/%s/%s", kind, namespace, name) + if !cr.allowedToMeasure(kindNamespaceName) { + return []ovsdb.Operation{}, func() {}, time.Time{}, nil + } + cr.measurementsMu.RLock() + m, ok := cr.measurements[kindNamespaceName] + cr.measurementsMu.RUnlock() + if !ok { + // no measurement found, therefore no-op + return []ovsdb.Operation{}, func() {}, time.Time{}, nil + } + if m.end { + // existing measurement in-progress and not processed yet, therefore no-op + return []ovsdb.Operation{}, func() {}, time.Time{}, nil + } + nbGlobal := &nbdb.NBGlobal{} + nbGlobal, err := libovsdbops.GetNBGlobal(nbClient, nbGlobal) + if err != nil { + return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("failed to find OVN Northbound NB_Global table"+ + " entry: %v", err) + } + if nbGlobal.NbCfg < 0 { + return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("nb_cfg is negative, failed to add OVN measurement") + } + //stop recording if we are close to overflow + if nbGlobal.NbCfg > maxNbCfg { + return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("unable to measure OVN due to nb_cfg being close to overflow") + } + ops, err := nbClient.Where(nbGlobal).Mutate(nbGlobal, model.Mutation{ + Field: &nbGlobal.NbCfg, + Mutator: ovsdb.MutateOperationAdd, + Value: 1, + }) + if err != nil { + return []ovsdb.Operation{}, func() {}, time.Time{}, fmt.Errorf("failed to create update operation: %v", err) + } + ovnStartTimestamp := time.Now() + + return ops, func() { + // there can be a race condition here where we queue the wrong nbCfg value, but it is ok as long as it is + // less than or equal the hv_cfg value we see and this is the case because of atomic increments for nb_cfg + cr.measurementsMu.Lock() + m, ok = cr.measurements[kindNamespaceName] + if !ok { + klog.Errorf("Config duration recorder: expected a measurement entry. Call Start before AddOVN"+ + " for %s", kindNamespaceName) + cr.measurementsMu.Unlock() + return + } + m.ovnMeasurements = append(m.ovnMeasurements, ovnMeasurement{startTimestamp: ovnStartTimestamp, + nbCfg: nbGlobal.NbCfg + 1}) + cr.measurements[kindNamespaceName] = m + cr.measurementsMu.Unlock() + }, ovnStartTimestamp, nil +} + +// runMeasurementRateAdjuster will adjust the rate of measurements based on the number of nodes in the cluster and arg k +func (cr *ConfigDurationRecorder) runMeasurementRateAdjuster(wf *factory.WatchFactory, k float64, nodeCheckPeriod time.Duration, + stop <-chan struct{}) { + var currentMeasurementRate, newMeasurementRate uint64 + + updateMeasurementRate := func() { + if nodeCount, err := getNodeCount(wf); err != nil { + klog.Errorf("Config duration recorder: failed to update ticker duration considering node count: %v", err) + } else { + newMeasurementRate = uint64(math.Round(k * float64(nodeCount))) + if newMeasurementRate != currentMeasurementRate { + if newMeasurementRate > 0 { + currentMeasurementRate = newMeasurementRate + cr.measurementRate = newMeasurementRate + } + klog.V(5).Infof("Config duration recorder: updated measurement rate to approx 1 in"+ + " every %d requests", newMeasurementRate) + } + } + } + + // initial measurement rate adjustment + updateMeasurementRate() + + go func() { + nodeCheckTicker := time.NewTicker(nodeCheckPeriod) + for { + select { + case <-nodeCheckTicker.C: + updateMeasurementRate() + case <-stop: + nodeCheckTicker.Stop() + return + } + } + }() +} + +// processMeasurements manages the measurements map. It calculates metrics and cleans up finished or stale measurements +func (cr *ConfigDurationRecorder) processMeasurements(period time.Duration, updateOVNMeasurementCh chan hvCfgUpdate, + deleteOVNMeasurementCh chan int, stop <-chan struct{}) { + ticker := time.NewTicker(period) + var ovnKDelta, ovnDelta float64 + + for { + select { + case <-stop: + ticker.Stop() + return + // remove measurements if channel updateOVNMeasurementCh overflows, therefore we cannot trust existing measurements + case hvCfg := <-deleteOVNMeasurementCh: + cr.measurementsMu.Lock() + removeOVNMeasurements(cr.measurements, hvCfg) + cr.measurementsMu.Unlock() + case h := <-updateOVNMeasurementCh: + cr.measurementsMu.Lock() + cr.addHvCfg(h.hvCfg, h.timestamp) + cr.measurementsMu.Unlock() + // used for processing measurements that didn't require OVN measurement. Helps to keep measurement map small + case kindNamespaceName := <-cr.triggerProcessCh: + cr.measurementsMu.Lock() + m, ok := cr.measurements[kindNamespaceName] + if !ok { + klog.Errorf("Config duration recorder: expected measurement, but not found") + cr.measurementsMu.Unlock() + continue + } + if !m.end { + cr.measurementsMu.Unlock() + continue + } + if len(m.ovnMeasurements) != 0 { + cr.measurementsMu.Unlock() + continue + } + ovnKDelta = m.endTimestamp.Sub(m.startTimestamp).Seconds() + metricNetworkProgramming.With(prometheus.Labels{"kind": m.kind}).Observe(ovnKDelta) + klog.V(5).Infof("Config duration recorder: kind/namespace/name %s. OVN-Kubernetes controller took %v"+ + " seconds. No OVN measurement.", kindNamespaceName, ovnKDelta) + delete(cr.measurements, kindNamespaceName) + cr.measurementsMu.Unlock() + // used for processing measurements that require OVN measurement or do not or are expired. + case <-ticker.C: + start := time.Now() + cr.measurementsMu.Lock() + // process and clean up measurements + for kindNamespaceName, m := range cr.measurements { + if start.After(m.expiresAt) { + // measurement may expire if OVN is degraded or End wasn't called + klog.Warningf("Config duration recorder: measurement expired for %s", kindNamespaceName) + delete(cr.measurements, kindNamespaceName) + continue + } + if !m.end { + // measurement didn't end yet, process later + continue + } + // for when no ovn measurements requested + if len(m.ovnMeasurements) == 0 { + ovnKDelta = m.endTimestamp.Sub(m.startTimestamp).Seconds() + metricNetworkProgramming.With(prometheus.Labels{"kind": m.kind}).Observe(ovnKDelta) + klog.V(5).Infof("Config duration recorder: kind/namespace/name %s. OVN-Kubernetes controller"+ + " took %v seconds. No OVN measurement.", kindNamespaceName, ovnKDelta) + delete(cr.measurements, kindNamespaceName) + continue + } + // for each kind/namespace/name, there can be multiple calls to AddOVN between start and end + // we sum all the OVN durations and add it to the start and end duration + // first lets make sure all OVN measurements are finished + if complete := allOVNMeasurementsComplete(m.ovnMeasurements); !complete { + continue + } + + ovnKDelta = m.endTimestamp.Sub(m.startTimestamp).Seconds() + ovnDelta = calculateOVNDuration(m.ovnMeasurements) + metricNetworkProgramming.With(prometheus.Labels{"kind": m.kind}).Observe(ovnKDelta + ovnDelta) + metricNetworkProgrammingOVN.Observe(ovnDelta) + klog.V(5).Infof("Config duration recorder: kind/namespace/name %s. OVN-Kubernetes controller took"+ + " %v seconds. OVN took %v seconds. Total took %v seconds", kindNamespaceName, ovnKDelta, + ovnDelta, ovnDelta+ovnKDelta) + delete(cr.measurements, kindNamespaceName) + } + cr.measurementsMu.Unlock() + } + } +} + +func (cr *ConfigDurationRecorder) addHvCfg(hvCfg, hvCfgTimestamp int) { + var altered bool + for i, m := range cr.measurements { + altered = false + for iOvnM, ovnM := range m.ovnMeasurements { + if ovnM.complete { + continue + } + if ovnM.nbCfg <= hvCfg { + ovnM.endTimestamp = time.UnixMilli(int64(hvCfgTimestamp)) + ovnM.complete = true + m.ovnMeasurements[iOvnM] = ovnM + altered = true + } + } + if altered { + cr.measurements[i] = m + } + } +} + +func getNodeCount(wf *factory.WatchFactory) (int, error) { + nodes, err := wf.GetNodes() + if err != nil { + return 0, fmt.Errorf("unable to retrieve node list: %v", err) + } + return len(nodes), nil +} + +func removeOVNMeasurement(oM []ovnMeasurement, i int) []ovnMeasurement { + oM[i] = oM[len(oM)-1] + return oM[:len(oM)-1] +} +func hashToNumber(s string) uint64 { + h := fnv.New64() + h.Write([]byte(s)) + return h.Sum64() +} + +func calculateOVNDuration(ovnMeasurements []ovnMeasurement) float64 { + var totalDuration float64 + for _, oM := range ovnMeasurements { + if !oM.complete { + continue + } + totalDuration += oM.endTimestamp.Sub(oM.startTimestamp).Seconds() + } + return totalDuration +} + +func allOVNMeasurementsComplete(ovnMeasurements []ovnMeasurement) bool { + for _, oM := range ovnMeasurements { + if !oM.complete { + return false + } + } + return true +} + +// merge direct copy from k8 pkg/proxy/metrics/metrics.go +func merge(slices ...[]float64) []float64 { + result := make([]float64, 1) + for _, s := range slices { + result = append(result, s...) + } + return result +} diff --git a/go-controller/pkg/metrics/ovnkube_controller_test.go b/go-controller/pkg/metrics/recorders/duration_test.go similarity index 85% rename from go-controller/pkg/metrics/ovnkube_controller_test.go rename to go-controller/pkg/metrics/recorders/duration_test.go index 1ff008db59..2e725f99c3 100644 --- a/go-controller/pkg/metrics/ovnkube_controller_test.go +++ b/go-controller/pkg/metrics/recorders/duration_test.go @@ -1,4 +1,4 @@ -package metrics +package recorders import ( "fmt" @@ -12,19 +12,31 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fakeclientgo "k8s.io/client-go/kubernetes/fake" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" + egressfirewallfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake" + egressipfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/clientset/versioned/fake" + egressqosfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/clientset/versioned/fake" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/mocks" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) -func setupOvn(nbData libovsdbtest.TestSetup) (client.Client, client.Client, *libovsdbtest.Context) { - nbClient, sbClient, cleanup, err := libovsdbtest.NewNBSBTestHarness(nbData) +func setHvCfg(nbClient client.Client, hvCfg int, hvCfgTimestamp time.Time) { + nbGlobal := nbdb.NBGlobal{} + nbGlobalResp, err := libovsdbops.GetNBGlobal(nbClient, &nbGlobal) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + nbGlobalResp.HvCfg = hvCfg + nbGlobalResp.HvCfgTimestamp = int(hvCfgTimestamp.UnixMilli()) + ops, err := nbClient.Where(nbGlobalResp).Update(nbGlobalResp, &nbGlobalResp.HvCfg, &nbGlobalResp.HvCfgTimestamp) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(ops).To(gomega.HaveLen(1)) + _, err = libovsdbops.TransactAndCheck(nbClient, ops) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - return sbClient, nbClient, cleanup } func getKubeClient(nodeCount int) *kube.Kube { @@ -40,23 +52,16 @@ func getKubeClient(nodeCount int) *kube.Kube { return &kube.Kube{KClient: kubeFakeClient} } -func setHvCfg(nbClient client.Client, hvCfg int, hvCfgTimestamp time.Time) { - nbGlobal := nbdb.NBGlobal{} - nbGlobalResp, err := libovsdbops.GetNBGlobal(nbClient, &nbGlobal) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - nbGlobalResp.HvCfg = hvCfg - nbGlobalResp.HvCfgTimestamp = int(hvCfgTimestamp.UnixMilli()) - ops, err := nbClient.Where(nbGlobalResp).Update(nbGlobalResp, &nbGlobalResp.HvCfg, &nbGlobalResp.HvCfgTimestamp) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(ops).To(gomega.HaveLen(1)) - _, err = libovsdbops.TransactAndCheck(nbClient, ops) +func setupOvn(nbData libovsdbtest.TestSetup) (client.Client, client.Client, *libovsdbtest.Context) { + nbClient, sbClient, cleanup, err := libovsdbtest.NewNBSBTestHarness(nbData) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return sbClient, nbClient, cleanup } var _ = ginkgo.Describe("Config Duration Operations", func() { var ( instance *ConfigDurationRecorder - k *kube.Kube + wf *factory.WatchFactory nbClient client.Client cleanup *libovsdbtest.Context stop chan struct{} @@ -69,7 +74,23 @@ var _ = ginkgo.Describe("Config Duration Operations", func() { ginkgo.BeforeEach(func() { cdr = nil instance = GetConfigDurationRecorder() - k = getKubeClient(1) + k := getKubeClient(1) + egressFirewallFakeClient := &egressfirewallfake.Clientset{} + egressIPFakeClient := &egressipfake.Clientset{} + egressQoSFakeClient := &egressqosfake.Clientset{} + fakeClient := &util.OVNClientset{ + KubeClient: k.KClient, + EgressIPClient: egressIPFakeClient, + EgressFirewallClient: egressFirewallFakeClient, + EgressQoSClient: egressQoSFakeClient, + } + + var err error + wf, err = factory.NewMasterWatchFactory(fakeClient.GetMasterClientset()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = wf.Start() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + stop = make(chan struct{}) _, nbClient, cleanup = setupOvn(libovsdbtest.TestSetup{ NBData: []libovsdbtest.TestData{&nbdb.NBGlobal{UUID: "cd-op-uuid"}}}) @@ -78,11 +99,12 @@ var _ = ginkgo.Describe("Config Duration Operations", func() { ginkgo.AfterEach(func() { cleanup.Cleanup() close(stop) + wf.Stop() }) ginkgo.Context("Runtime", func() { ginkgo.It("records correctly", func() { - instance.Run(nbClient, k, 0, time.Millisecond, stop) + instance.Run(nbClient, wf, 0, time.Millisecond, stop) histoMock := mocks.NewHistogramVecMock() metricNetworkProgramming = histoMock startTimestamp, ok := instance.Start("pod", testNamespaceA, testPodNameA) @@ -104,7 +126,7 @@ var _ = ginkgo.Describe("Config Duration Operations", func() { }) ginkgo.It("records correctly with OVN latency", func() { - instance.Run(nbClient, k, 0, time.Millisecond, stop) + instance.Run(nbClient, wf, 0, time.Millisecond, stop) histoMock := mocks.NewHistogramVecMock() metricNetworkProgramming = histoMock startTimestamp, ok := instance.Start("pod", testNamespaceA, testPodNameA) @@ -134,7 +156,7 @@ var _ = ginkgo.Describe("Config Duration Operations", func() { }) ginkgo.It("records multiple different objs including adding OVN latency", func() { - instance.Run(nbClient, k, 0, time.Millisecond, stop) + instance.Run(nbClient, wf, 0, time.Millisecond, stop) histoMock := mocks.NewHistogramVecMock() metricNetworkProgramming = histoMock // recording 1 @@ -186,13 +208,13 @@ var _ = ginkgo.Describe("Config Duration Operations", func() { }) ginkgo.It("denies recording when no start called", func() { - instance.Run(nbClient, k, 0, time.Millisecond, stop) + instance.Run(nbClient, wf, 0, time.Millisecond, stop) ops, _, _, _ := instance.AddOVN(nbClient, "pod", testNamespaceA, testPodNameA) gomega.Expect(ops).Should(gomega.BeEmpty()) }) ginkgo.It("allows multiple addOVN records for the same obj", func() { - instance.Run(nbClient, k, 0, time.Millisecond, stop) + instance.Run(nbClient, wf, 0, time.Millisecond, stop) histoMock := mocks.NewHistogramVecMock() metricNetworkProgramming = histoMock // recording 1 diff --git a/go-controller/pkg/nbdb/acl.go b/go-controller/pkg/nbdb/acl.go index 0c2840c178..5415af620b 100644 --- a/go-controller/pkg/nbdb/acl.go +++ b/go-controller/pkg/nbdb/acl.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ACLTable = "ACL" diff --git a/go-controller/pkg/nbdb/address_set.go b/go-controller/pkg/nbdb/address_set.go index e8a836e2d1..be37eaf40d 100644 --- a/go-controller/pkg/nbdb/address_set.go +++ b/go-controller/pkg/nbdb/address_set.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const AddressSetTable = "Address_Set" diff --git a/go-controller/pkg/nbdb/bfd.go b/go-controller/pkg/nbdb/bfd.go index 46646e81a7..4211ceae80 100644 --- a/go-controller/pkg/nbdb/bfd.go +++ b/go-controller/pkg/nbdb/bfd.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const BFDTable = "BFD" diff --git a/go-controller/pkg/nbdb/chassis_template_var.go b/go-controller/pkg/nbdb/chassis_template_var.go index 602c3f5223..59c61d07de 100644 --- a/go-controller/pkg/nbdb/chassis_template_var.go +++ b/go-controller/pkg/nbdb/chassis_template_var.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ChassisTemplateVarTable = "Chassis_Template_Var" diff --git a/go-controller/pkg/nbdb/connection.go b/go-controller/pkg/nbdb/connection.go index baf6da344b..da2aa4bca3 100644 --- a/go-controller/pkg/nbdb/connection.go +++ b/go-controller/pkg/nbdb/connection.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ConnectionTable = "Connection" diff --git a/go-controller/pkg/nbdb/copp.go b/go-controller/pkg/nbdb/copp.go index 1e146b657e..54bbc841f6 100644 --- a/go-controller/pkg/nbdb/copp.go +++ b/go-controller/pkg/nbdb/copp.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const CoppTable = "Copp" diff --git a/go-controller/pkg/nbdb/dhcp_options.go b/go-controller/pkg/nbdb/dhcp_options.go index fd68ebee2d..7b58c1fe35 100644 --- a/go-controller/pkg/nbdb/dhcp_options.go +++ b/go-controller/pkg/nbdb/dhcp_options.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DHCPOptionsTable = "DHCP_Options" diff --git a/go-controller/pkg/nbdb/dhcp_relay.go b/go-controller/pkg/nbdb/dhcp_relay.go index f0e973ab78..5e10f2aff4 100644 --- a/go-controller/pkg/nbdb/dhcp_relay.go +++ b/go-controller/pkg/nbdb/dhcp_relay.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DHCPRelayTable = "DHCP_Relay" diff --git a/go-controller/pkg/nbdb/dns.go b/go-controller/pkg/nbdb/dns.go index 285d5df280..a15b166a80 100644 --- a/go-controller/pkg/nbdb/dns.go +++ b/go-controller/pkg/nbdb/dns.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DNSTable = "DNS" diff --git a/go-controller/pkg/nbdb/forwarding_group.go b/go-controller/pkg/nbdb/forwarding_group.go index 1a0657559d..82078551d3 100644 --- a/go-controller/pkg/nbdb/forwarding_group.go +++ b/go-controller/pkg/nbdb/forwarding_group.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ForwardingGroupTable = "Forwarding_Group" diff --git a/go-controller/pkg/nbdb/gateway_chassis.go b/go-controller/pkg/nbdb/gateway_chassis.go index 15935847b8..de6925f4c3 100644 --- a/go-controller/pkg/nbdb/gateway_chassis.go +++ b/go-controller/pkg/nbdb/gateway_chassis.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const GatewayChassisTable = "Gateway_Chassis" diff --git a/go-controller/pkg/nbdb/ha_chassis.go b/go-controller/pkg/nbdb/ha_chassis.go index dc09d1ec9d..8c171ddd09 100644 --- a/go-controller/pkg/nbdb/ha_chassis.go +++ b/go-controller/pkg/nbdb/ha_chassis.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const HAChassisTable = "HA_Chassis" diff --git a/go-controller/pkg/nbdb/ha_chassis_group.go b/go-controller/pkg/nbdb/ha_chassis_group.go index bdda95aaf7..6d304fd2e9 100644 --- a/go-controller/pkg/nbdb/ha_chassis_group.go +++ b/go-controller/pkg/nbdb/ha_chassis_group.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const HAChassisGroupTable = "HA_Chassis_Group" diff --git a/go-controller/pkg/nbdb/load_balancer.go b/go-controller/pkg/nbdb/load_balancer.go index 03bcd76011..553bc48dda 100644 --- a/go-controller/pkg/nbdb/load_balancer.go +++ b/go-controller/pkg/nbdb/load_balancer.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LoadBalancerTable = "Load_Balancer" @@ -13,15 +13,17 @@ type ( ) var ( - LoadBalancerProtocolTCP LoadBalancerProtocol = "tcp" - LoadBalancerProtocolUDP LoadBalancerProtocol = "udp" - LoadBalancerProtocolSCTP LoadBalancerProtocol = "sctp" - LoadBalancerSelectionFieldsEthSrc LoadBalancerSelectionFields = "eth_src" - LoadBalancerSelectionFieldsEthDst LoadBalancerSelectionFields = "eth_dst" - LoadBalancerSelectionFieldsIPSrc LoadBalancerSelectionFields = "ip_src" - LoadBalancerSelectionFieldsIPDst LoadBalancerSelectionFields = "ip_dst" - LoadBalancerSelectionFieldsTpSrc LoadBalancerSelectionFields = "tp_src" - LoadBalancerSelectionFieldsTpDst LoadBalancerSelectionFields = "tp_dst" + LoadBalancerProtocolTCP LoadBalancerProtocol = "tcp" + LoadBalancerProtocolUDP LoadBalancerProtocol = "udp" + LoadBalancerProtocolSCTP LoadBalancerProtocol = "sctp" + LoadBalancerSelectionFieldsEthSrc LoadBalancerSelectionFields = "eth_src" + LoadBalancerSelectionFieldsEthDst LoadBalancerSelectionFields = "eth_dst" + LoadBalancerSelectionFieldsIPSrc LoadBalancerSelectionFields = "ip_src" + LoadBalancerSelectionFieldsIPDst LoadBalancerSelectionFields = "ip_dst" + LoadBalancerSelectionFieldsIpv6Src LoadBalancerSelectionFields = "ipv6_src" + LoadBalancerSelectionFieldsIpv6Dst LoadBalancerSelectionFields = "ipv6_dst" + LoadBalancerSelectionFieldsTpSrc LoadBalancerSelectionFields = "tp_src" + LoadBalancerSelectionFieldsTpDst LoadBalancerSelectionFields = "tp_dst" ) // LoadBalancer defines an object in Load_Balancer table diff --git a/go-controller/pkg/nbdb/load_balancer_group.go b/go-controller/pkg/nbdb/load_balancer_group.go index 7759249674..8d39f095ab 100644 --- a/go-controller/pkg/nbdb/load_balancer_group.go +++ b/go-controller/pkg/nbdb/load_balancer_group.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LoadBalancerGroupTable = "Load_Balancer_Group" diff --git a/go-controller/pkg/nbdb/load_balancer_health_check.go b/go-controller/pkg/nbdb/load_balancer_health_check.go index c8163fa007..8fc7020364 100644 --- a/go-controller/pkg/nbdb/load_balancer_health_check.go +++ b/go-controller/pkg/nbdb/load_balancer_health_check.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LoadBalancerHealthCheckTable = "Load_Balancer_Health_Check" diff --git a/go-controller/pkg/nbdb/logical_router.go b/go-controller/pkg/nbdb/logical_router.go index 81c5efaf9d..f303af80fa 100644 --- a/go-controller/pkg/nbdb/logical_router.go +++ b/go-controller/pkg/nbdb/logical_router.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalRouterTable = "Logical_Router" diff --git a/go-controller/pkg/nbdb/logical_router_policy.go b/go-controller/pkg/nbdb/logical_router_policy.go index 7272dbb8ad..377ef213d0 100644 --- a/go-controller/pkg/nbdb/logical_router_policy.go +++ b/go-controller/pkg/nbdb/logical_router_policy.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalRouterPolicyTable = "Logical_Router_Policy" @@ -15,6 +15,7 @@ var ( LogicalRouterPolicyActionAllow LogicalRouterPolicyAction = "allow" LogicalRouterPolicyActionDrop LogicalRouterPolicyAction = "drop" LogicalRouterPolicyActionReroute LogicalRouterPolicyAction = "reroute" + LogicalRouterPolicyActionJump LogicalRouterPolicyAction = "jump" ) // LogicalRouterPolicy defines an object in Logical_Router_Policy table @@ -22,7 +23,9 @@ type LogicalRouterPolicy struct { UUID string `ovsdb:"_uuid"` Action LogicalRouterPolicyAction `ovsdb:"action"` BFDSessions []string `ovsdb:"bfd_sessions"` + Chain *string `ovsdb:"chain"` ExternalIDs map[string]string `ovsdb:"external_ids"` + JumpChain *string `ovsdb:"jump_chain"` Match string `ovsdb:"match"` Nexthop *string `ovsdb:"nexthop"` Nexthops []string `ovsdb:"nexthops"` @@ -66,6 +69,28 @@ func equalLogicalRouterPolicyBFDSessions(a, b []string) bool { return true } +func (a *LogicalRouterPolicy) GetChain() *string { + return a.Chain +} + +func copyLogicalRouterPolicyChain(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPolicyChain(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + func (a *LogicalRouterPolicy) GetExternalIDs() map[string]string { return a.ExternalIDs } @@ -96,6 +121,28 @@ func equalLogicalRouterPolicyExternalIDs(a, b map[string]string) bool { return true } +func (a *LogicalRouterPolicy) GetJumpChain() *string { + return a.JumpChain +} + +func copyLogicalRouterPolicyJumpChain(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalRouterPolicyJumpChain(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + func (a *LogicalRouterPolicy) GetMatch() string { return a.Match } @@ -187,7 +234,9 @@ func (a *LogicalRouterPolicy) GetPriority() int { func (a *LogicalRouterPolicy) DeepCopyInto(b *LogicalRouterPolicy) { *b = *a b.BFDSessions = copyLogicalRouterPolicyBFDSessions(a.BFDSessions) + b.Chain = copyLogicalRouterPolicyChain(a.Chain) b.ExternalIDs = copyLogicalRouterPolicyExternalIDs(a.ExternalIDs) + b.JumpChain = copyLogicalRouterPolicyJumpChain(a.JumpChain) b.Nexthop = copyLogicalRouterPolicyNexthop(a.Nexthop) b.Nexthops = copyLogicalRouterPolicyNexthops(a.Nexthops) b.Options = copyLogicalRouterPolicyOptions(a.Options) @@ -212,7 +261,9 @@ func (a *LogicalRouterPolicy) Equals(b *LogicalRouterPolicy) bool { return a.UUID == b.UUID && a.Action == b.Action && equalLogicalRouterPolicyBFDSessions(a.BFDSessions, b.BFDSessions) && + equalLogicalRouterPolicyChain(a.Chain, b.Chain) && equalLogicalRouterPolicyExternalIDs(a.ExternalIDs, b.ExternalIDs) && + equalLogicalRouterPolicyJumpChain(a.JumpChain, b.JumpChain) && a.Match == b.Match && equalLogicalRouterPolicyNexthop(a.Nexthop, b.Nexthop) && equalLogicalRouterPolicyNexthops(a.Nexthops, b.Nexthops) && diff --git a/go-controller/pkg/nbdb/logical_router_port.go b/go-controller/pkg/nbdb/logical_router_port.go index d39fe0db42..1d220b82d1 100644 --- a/go-controller/pkg/nbdb/logical_router_port.go +++ b/go-controller/pkg/nbdb/logical_router_port.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalRouterPortTable = "Logical_Router_Port" diff --git a/go-controller/pkg/nbdb/logical_router_static_route.go b/go-controller/pkg/nbdb/logical_router_static_route.go index ce966e5707..ceccb8ac78 100644 --- a/go-controller/pkg/nbdb/logical_router_static_route.go +++ b/go-controller/pkg/nbdb/logical_router_static_route.go @@ -3,30 +3,41 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalRouterStaticRouteTable = "Logical_Router_Static_Route" type ( - LogicalRouterStaticRoutePolicy = string + LogicalRouterStaticRoutePolicy = string + LogicalRouterStaticRouteSelectionFields = string ) var ( - LogicalRouterStaticRoutePolicySrcIP LogicalRouterStaticRoutePolicy = "src-ip" - LogicalRouterStaticRoutePolicyDstIP LogicalRouterStaticRoutePolicy = "dst-ip" + LogicalRouterStaticRoutePolicySrcIP LogicalRouterStaticRoutePolicy = "src-ip" + LogicalRouterStaticRoutePolicyDstIP LogicalRouterStaticRoutePolicy = "dst-ip" + LogicalRouterStaticRouteSelectionFieldsEthSrc LogicalRouterStaticRouteSelectionFields = "eth_src" + LogicalRouterStaticRouteSelectionFieldsEthDst LogicalRouterStaticRouteSelectionFields = "eth_dst" + LogicalRouterStaticRouteSelectionFieldsIPProto LogicalRouterStaticRouteSelectionFields = "ip_proto" + LogicalRouterStaticRouteSelectionFieldsIPSrc LogicalRouterStaticRouteSelectionFields = "ip_src" + LogicalRouterStaticRouteSelectionFieldsIPDst LogicalRouterStaticRouteSelectionFields = "ip_dst" + LogicalRouterStaticRouteSelectionFieldsIpv6Src LogicalRouterStaticRouteSelectionFields = "ipv6_src" + LogicalRouterStaticRouteSelectionFieldsIpv6Dst LogicalRouterStaticRouteSelectionFields = "ipv6_dst" + LogicalRouterStaticRouteSelectionFieldsTpSrc LogicalRouterStaticRouteSelectionFields = "tp_src" + LogicalRouterStaticRouteSelectionFieldsTpDst LogicalRouterStaticRouteSelectionFields = "tp_dst" ) // LogicalRouterStaticRoute defines an object in Logical_Router_Static_Route table type LogicalRouterStaticRoute struct { - UUID string `ovsdb:"_uuid"` - BFD *string `ovsdb:"bfd"` - ExternalIDs map[string]string `ovsdb:"external_ids"` - IPPrefix string `ovsdb:"ip_prefix"` - Nexthop string `ovsdb:"nexthop"` - Options map[string]string `ovsdb:"options"` - OutputPort *string `ovsdb:"output_port"` - Policy *LogicalRouterStaticRoutePolicy `ovsdb:"policy"` - RouteTable string `ovsdb:"route_table"` + UUID string `ovsdb:"_uuid"` + BFD *string `ovsdb:"bfd"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + IPPrefix string `ovsdb:"ip_prefix"` + Nexthop string `ovsdb:"nexthop"` + Options map[string]string `ovsdb:"options"` + OutputPort *string `ovsdb:"output_port"` + Policy *LogicalRouterStaticRoutePolicy `ovsdb:"policy"` + RouteTable string `ovsdb:"route_table"` + SelectionFields []LogicalRouterStaticRouteSelectionFields `ovsdb:"selection_fields"` } func (a *LogicalRouterStaticRoute) GetUUID() string { @@ -171,6 +182,34 @@ func (a *LogicalRouterStaticRoute) GetRouteTable() string { return a.RouteTable } +func (a *LogicalRouterStaticRoute) GetSelectionFields() []LogicalRouterStaticRouteSelectionFields { + return a.SelectionFields +} + +func copyLogicalRouterStaticRouteSelectionFields(a []LogicalRouterStaticRouteSelectionFields) []LogicalRouterStaticRouteSelectionFields { + if a == nil { + return nil + } + b := make([]LogicalRouterStaticRouteSelectionFields, len(a)) + copy(b, a) + return b +} + +func equalLogicalRouterStaticRouteSelectionFields(a, b []LogicalRouterStaticRouteSelectionFields) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for i, v := range a { + if b[i] != v { + return false + } + } + return true +} + func (a *LogicalRouterStaticRoute) DeepCopyInto(b *LogicalRouterStaticRoute) { *b = *a b.BFD = copyLogicalRouterStaticRouteBFD(a.BFD) @@ -178,6 +217,7 @@ func (a *LogicalRouterStaticRoute) DeepCopyInto(b *LogicalRouterStaticRoute) { b.Options = copyLogicalRouterStaticRouteOptions(a.Options) b.OutputPort = copyLogicalRouterStaticRouteOutputPort(a.OutputPort) b.Policy = copyLogicalRouterStaticRoutePolicy(a.Policy) + b.SelectionFields = copyLogicalRouterStaticRouteSelectionFields(a.SelectionFields) } func (a *LogicalRouterStaticRoute) DeepCopy() *LogicalRouterStaticRoute { @@ -204,7 +244,8 @@ func (a *LogicalRouterStaticRoute) Equals(b *LogicalRouterStaticRoute) bool { equalLogicalRouterStaticRouteOptions(a.Options, b.Options) && equalLogicalRouterStaticRouteOutputPort(a.OutputPort, b.OutputPort) && equalLogicalRouterStaticRoutePolicy(a.Policy, b.Policy) && - a.RouteTable == b.RouteTable + a.RouteTable == b.RouteTable && + equalLogicalRouterStaticRouteSelectionFields(a.SelectionFields, b.SelectionFields) } func (a *LogicalRouterStaticRoute) EqualsModel(b model.Model) bool { diff --git a/go-controller/pkg/nbdb/logical_switch.go b/go-controller/pkg/nbdb/logical_switch.go index 50b8214ad3..8a342dd315 100644 --- a/go-controller/pkg/nbdb/logical_switch.go +++ b/go-controller/pkg/nbdb/logical_switch.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalSwitchTable = "Logical_Switch" diff --git a/go-controller/pkg/nbdb/logical_switch_port.go b/go-controller/pkg/nbdb/logical_switch_port.go index c048f76541..87994fdc72 100644 --- a/go-controller/pkg/nbdb/logical_switch_port.go +++ b/go-controller/pkg/nbdb/logical_switch_port.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalSwitchPortTable = "Logical_Switch_Port" @@ -21,6 +21,7 @@ type LogicalSwitchPort struct { Name string `ovsdb:"name"` Options map[string]string `ovsdb:"options"` ParentName *string `ovsdb:"parent_name"` + Peer *string `ovsdb:"peer"` PortSecurity []string `ovsdb:"port_security"` Tag *int `ovsdb:"tag"` TagRequest *int `ovsdb:"tag_request"` @@ -284,6 +285,28 @@ func equalLogicalSwitchPortParentName(a, b *string) bool { return *a == *b } +func (a *LogicalSwitchPort) GetPeer() *string { + return a.Peer +} + +func copyLogicalSwitchPortPeer(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalLogicalSwitchPortPeer(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + func (a *LogicalSwitchPort) GetPortSecurity() []string { return a.PortSecurity } @@ -394,6 +417,7 @@ func (a *LogicalSwitchPort) DeepCopyInto(b *LogicalSwitchPort) { b.MirrorRules = copyLogicalSwitchPortMirrorRules(a.MirrorRules) b.Options = copyLogicalSwitchPortOptions(a.Options) b.ParentName = copyLogicalSwitchPortParentName(a.ParentName) + b.Peer = copyLogicalSwitchPortPeer(a.Peer) b.PortSecurity = copyLogicalSwitchPortPortSecurity(a.PortSecurity) b.Tag = copyLogicalSwitchPortTag(a.Tag) b.TagRequest = copyLogicalSwitchPortTagRequest(a.TagRequest) @@ -428,6 +452,7 @@ func (a *LogicalSwitchPort) Equals(b *LogicalSwitchPort) bool { a.Name == b.Name && equalLogicalSwitchPortOptions(a.Options, b.Options) && equalLogicalSwitchPortParentName(a.ParentName, b.ParentName) && + equalLogicalSwitchPortPeer(a.Peer, b.Peer) && equalLogicalSwitchPortPortSecurity(a.PortSecurity, b.PortSecurity) && equalLogicalSwitchPortTag(a.Tag, b.Tag) && equalLogicalSwitchPortTagRequest(a.TagRequest, b.TagRequest) && diff --git a/go-controller/pkg/nbdb/meter.go b/go-controller/pkg/nbdb/meter.go index 09b7e9e6a4..e3a4a713da 100644 --- a/go-controller/pkg/nbdb/meter.go +++ b/go-controller/pkg/nbdb/meter.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MeterTable = "Meter" diff --git a/go-controller/pkg/nbdb/meter_band.go b/go-controller/pkg/nbdb/meter_band.go index 4ef0d901ac..1e1e7ad421 100644 --- a/go-controller/pkg/nbdb/meter_band.go +++ b/go-controller/pkg/nbdb/meter_band.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MeterBandTable = "Meter_Band" diff --git a/go-controller/pkg/nbdb/mirror.go b/go-controller/pkg/nbdb/mirror.go index 57e3b01f6d..352cc238af 100644 --- a/go-controller/pkg/nbdb/mirror.go +++ b/go-controller/pkg/nbdb/mirror.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MirrorTable = "Mirror" diff --git a/go-controller/pkg/nbdb/model.go b/go-controller/pkg/nbdb/model.go index daabac4530..07ca7e0e97 100644 --- a/go-controller/pkg/nbdb/model.go +++ b/go-controller/pkg/nbdb/model.go @@ -6,8 +6,8 @@ package nbdb import ( "encoding/json" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // FullDatabaseModel returns the DatabaseModel object to be used in libovsdb @@ -52,7 +52,7 @@ func FullDatabaseModel() (model.ClientDBModel, error) { var schema = `{ "name": "OVN_Northbound", - "version": "7.6.0", + "version": "7.11.0", "tables": { "ACL": { "columns": { @@ -819,6 +819,8 @@ var schema = `{ "eth_dst", "ip_src", "ip_dst", + "ipv6_src", + "ipv6_dst", "tp_src", "tp_dst" ] @@ -1026,7 +1028,8 @@ var schema = `{ [ "allow", "drop", - "reroute" + "reroute", + "jump" ] ] } @@ -1043,6 +1046,15 @@ var schema = `{ "max": "unlimited" } }, + "chain": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, "external_ids": { "type": { "key": { @@ -1055,6 +1067,15 @@ var schema = `{ "max": "unlimited" } }, + "jump_chain": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, "match": { "type": "string" }, @@ -1187,7 +1208,7 @@ var schema = `{ "key": { "type": "string" }, - "min": 1, + "min": 0, "max": "unlimited" } }, @@ -1301,6 +1322,29 @@ var schema = `{ }, "route_table": { "type": "string" + }, + "selection_fields": { + "type": { + "key": { + "type": "string", + "enum": [ + "set", + [ + "eth_src", + "eth_dst", + "ip_proto", + "ip_src", + "ip_dst", + "ipv6_src", + "ipv6_dst", + "tp_src", + "tp_dst" + ] + ] + }, + "min": 0, + "max": "unlimited" + } } } }, @@ -1532,6 +1576,15 @@ var schema = `{ "max": 1 } }, + "peer": { + "type": { + "key": { + "type": "string" + }, + "min": 0, + "max": 1 + } + }, "port_security": { "type": { "key": { @@ -2092,6 +2145,9 @@ var schema = `{ "ssl_ciphers": { "type": "string" }, + "ssl_ciphersuites": { + "type": "string" + }, "ssl_protocols": { "type": "string" } diff --git a/go-controller/pkg/nbdb/nat.go b/go-controller/pkg/nbdb/nat.go index 4bd1b7ed49..b10bbd25b3 100644 --- a/go-controller/pkg/nbdb/nat.go +++ b/go-controller/pkg/nbdb/nat.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const NATTable = "NAT" diff --git a/go-controller/pkg/nbdb/nb_global.go b/go-controller/pkg/nbdb/nb_global.go index bae9e20f20..3779d259fe 100644 --- a/go-controller/pkg/nbdb/nb_global.go +++ b/go-controller/pkg/nbdb/nb_global.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const NBGlobalTable = "NB_Global" diff --git a/go-controller/pkg/nbdb/port_group.go b/go-controller/pkg/nbdb/port_group.go index bf4fa809bc..525f84d90e 100644 --- a/go-controller/pkg/nbdb/port_group.go +++ b/go-controller/pkg/nbdb/port_group.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const PortGroupTable = "Port_Group" diff --git a/go-controller/pkg/nbdb/qos.go b/go-controller/pkg/nbdb/qos.go index d25322b4b2..3303f61c4d 100644 --- a/go-controller/pkg/nbdb/qos.go +++ b/go-controller/pkg/nbdb/qos.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const QoSTable = "QoS" diff --git a/go-controller/pkg/nbdb/sample.go b/go-controller/pkg/nbdb/sample.go index 639393a1e6..d53ef23825 100644 --- a/go-controller/pkg/nbdb/sample.go +++ b/go-controller/pkg/nbdb/sample.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SampleTable = "Sample" diff --git a/go-controller/pkg/nbdb/sample_collector.go b/go-controller/pkg/nbdb/sample_collector.go index 50f0659040..487465ee0f 100644 --- a/go-controller/pkg/nbdb/sample_collector.go +++ b/go-controller/pkg/nbdb/sample_collector.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SampleCollectorTable = "Sample_Collector" diff --git a/go-controller/pkg/nbdb/sampling_app.go b/go-controller/pkg/nbdb/sampling_app.go index a152b4237d..cd7458da83 100644 --- a/go-controller/pkg/nbdb/sampling_app.go +++ b/go-controller/pkg/nbdb/sampling_app.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SamplingAppTable = "Sampling_App" diff --git a/go-controller/pkg/nbdb/ssl.go b/go-controller/pkg/nbdb/ssl.go index ddaba5d322..0f01efc978 100644 --- a/go-controller/pkg/nbdb/ssl.go +++ b/go-controller/pkg/nbdb/ssl.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SSLTable = "SSL" @@ -16,6 +16,7 @@ type SSL struct { ExternalIDs map[string]string `ovsdb:"external_ids"` PrivateKey string `ovsdb:"private_key"` SSLCiphers string `ovsdb:"ssl_ciphers"` + SSLCiphersuites string `ovsdb:"ssl_ciphersuites"` SSLProtocols string `ovsdb:"ssl_protocols"` } @@ -73,6 +74,10 @@ func (a *SSL) GetSSLCiphers() string { return a.SSLCiphers } +func (a *SSL) GetSSLCiphersuites() string { + return a.SSLCiphersuites +} + func (a *SSL) GetSSLProtocols() string { return a.SSLProtocols } @@ -105,6 +110,7 @@ func (a *SSL) Equals(b *SSL) bool { equalSSLExternalIDs(a.ExternalIDs, b.ExternalIDs) && a.PrivateKey == b.PrivateKey && a.SSLCiphers == b.SSLCiphers && + a.SSLCiphersuites == b.SSLCiphersuites && a.SSLProtocols == b.SSLProtocols } diff --git a/go-controller/pkg/nbdb/static_mac_binding.go b/go-controller/pkg/nbdb/static_mac_binding.go index 15207e6484..c3397e3e70 100644 --- a/go-controller/pkg/nbdb/static_mac_binding.go +++ b/go-controller/pkg/nbdb/static_mac_binding.go @@ -3,7 +3,7 @@ package nbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const StaticMACBindingTable = "Static_MAC_Binding" diff --git a/go-controller/pkg/networkmanager/nad_controller.go b/go-controller/pkg/networkmanager/nad_controller.go index b0c6a3198a..a212566ce1 100644 --- a/go-controller/pkg/networkmanager/nad_controller.go +++ b/go-controller/pkg/networkmanager/nad_controller.go @@ -274,6 +274,11 @@ func (c *nadController) syncNAD(key string, nad *nettypes.NetworkAttachmentDefin if nad != nil { nadNetwork, err = util.ParseNADInfo(nad) if err != nil { + // in case the type for the NAD is not ovn-k we should not record the error event + if err.Error() == util.ErrorAttachDefNotOvnManaged.Error() { + return nil + } + if c.recorder != nil { c.recorder.Eventf(&corev1.ObjectReference{Kind: nad.Kind, Namespace: nad.Namespace, Name: nad.Name}, corev1.EventTypeWarning, "InvalidConfig", "Failed to parse network config: %v", err.Error()) diff --git a/go-controller/pkg/networkmanager/nad_controller_test.go b/go-controller/pkg/networkmanager/nad_controller_test.go index c8a59b30b4..1ce5ad9168 100644 --- a/go-controller/pkg/networkmanager/nad_controller_test.go +++ b/go-controller/pkg/networkmanager/nad_controller_test.go @@ -469,6 +469,21 @@ func TestNADController(t *testing.T) { }, }, }, + { + name: "non ovn-k NAD added", + args: []args{ + { + nad: "test/nad_1", + network: &ovncnitypes.NetConf{ + NetConf: cnitypes.NetConf{ + Name: "test", + Type: "sriov", + }, + }, + wantErr: false, + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go new file mode 100644 index 0000000000..4031ff3cc8 --- /dev/null +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig.go @@ -0,0 +1,560 @@ +package bridgeconfig + +import ( + "fmt" + "net" + "strings" + "sync" + "sync/atomic" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/udn" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/egressip" + nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" + nodeutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +// BridgeUDNConfiguration holds the patchport and ctMark +// information for a given network +type BridgeUDNConfiguration struct { + PatchPort string + OfPortPatch string + MasqCTMark string + PktMark string + V4MasqIPs *udn.MasqueradeIPs + V6MasqIPs *udn.MasqueradeIPs + Subnets []config.CIDRNetworkEntry + NodeSubnets []*net.IPNet + Advertised atomic.Bool +} + +func (netConfig *BridgeUDNConfiguration) ShallowCopy() *BridgeUDNConfiguration { + copy := &BridgeUDNConfiguration{ + PatchPort: netConfig.PatchPort, + OfPortPatch: netConfig.OfPortPatch, + MasqCTMark: netConfig.MasqCTMark, + PktMark: netConfig.PktMark, + V4MasqIPs: netConfig.V4MasqIPs, + V6MasqIPs: netConfig.V6MasqIPs, + Subnets: netConfig.Subnets, + NodeSubnets: netConfig.NodeSubnets, + } + copy.Advertised.Store(netConfig.Advertised.Load()) + return copy +} + +func (netConfig *BridgeUDNConfiguration) IsDefaultNetwork() bool { + return netConfig.MasqCTMark == nodetypes.CtMarkOVN +} + +func (netConfig *BridgeUDNConfiguration) setOfPatchPort() error { + ofportPatch, stderr, err := util.GetOVSOfPort("get", "Interface", netConfig.PatchPort, "ofport") + if err != nil { + return fmt.Errorf("failed while waiting on patch port %q to be created by ovn-controller and "+ + "while getting ofport. stderr: %v, error: %v", netConfig.PatchPort, stderr, err) + } + netConfig.OfPortPatch = ofportPatch + return nil +} + +type BridgeConfiguration struct { + mutex sync.Mutex + + // variables that are only set on creation and never changed + // don't require mutex lock to read + nodeName string + bridgeName string + uplinkName string + gwIface string + gwIfaceRep string + interfaceID string + + // variables that can be updated (read/write access should be done with mutex held) + ofPortHost string + ips []*net.IPNet + macAddress net.HardwareAddr + ofPortPhys string + netConfig map[string]*BridgeUDNConfiguration + eipMarkIPs *egressip.MarkIPsCache +} + +func NewBridgeConfiguration(intfName, nodeName, + physicalNetworkName string, + nodeSubnets, gwIPs []*net.IPNet, + advertised bool) (*BridgeConfiguration, error) { + var intfRep string + var err error + isGWAcclInterface := false + gwIntf := intfName + + defaultNetConfig := &BridgeUDNConfiguration{ + MasqCTMark: nodetypes.CtMarkOVN, + Subnets: config.Default.ClusterSubnets, + NodeSubnets: nodeSubnets, + } + res := BridgeConfiguration{ + nodeName: nodeName, + netConfig: map[string]*BridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + eipMarkIPs: egressip.NewMarkIPsCache(), + } + res.netConfig[types.DefaultNetworkName].Advertised.Store(advertised) + + if config.Gateway.GatewayAcceleratedInterface != "" { + // Try to get representor for the specified gateway device. + // If function succeeds, then it is either a valid switchdev VF or SF, and we can use this accelerated device + // for node IP, Host Ofport for Openflow etc. + // If failed - error for improper configuration option + intfRep, err = getRepresentor(config.Gateway.GatewayAcceleratedInterface) + if err != nil { + return nil, fmt.Errorf("gateway accelerated interface %s is not valid: %w", config.Gateway.GatewayAcceleratedInterface, err) + } + gwIntf = config.Gateway.GatewayAcceleratedInterface + isGWAcclInterface = true + klog.Infof("For gateway accelerated interface %s representor: %s", config.Gateway.GatewayAcceleratedInterface, intfRep) + } else { + intfRep, err = getRepresentor(gwIntf) + if err == nil { + isGWAcclInterface = true + } + } + + if isGWAcclInterface { + bridgeName, _, err := util.RunOVSVsctl("port-to-br", intfRep) + if err != nil { + return nil, fmt.Errorf("failed to find bridge that has port %s: %w", intfRep, err) + } + link, err := util.GetNetLinkOps().LinkByName(gwIntf) + if err != nil { + return nil, fmt.Errorf("failed to get netdevice link for %s: %w", gwIntf, err) + } + uplinkName, err := util.GetNicName(bridgeName) + if err != nil { + return nil, fmt.Errorf("failed to find nic name for bridge %s: %w", bridgeName, err) + } + res.bridgeName = bridgeName + res.uplinkName = uplinkName + res.gwIfaceRep = intfRep + res.gwIface = gwIntf + res.macAddress = link.Attrs().HardwareAddr + } else if bridgeName, _, err := util.RunOVSVsctl("port-to-br", intfName); err == nil { + // This is an OVS bridge's internal port + uplinkName, err := util.GetNicName(bridgeName) + if err != nil { + return nil, fmt.Errorf("failed to find nic name for bridge %s: %w", bridgeName, err) + } + res.bridgeName = bridgeName + res.gwIface = bridgeName + res.uplinkName = uplinkName + gwIntf = bridgeName + } else if _, _, err := util.RunOVSVsctl("br-exists", intfName); err != nil { + // This is not a OVS bridge. We need to create a OVS bridge + // and add cluster.GatewayIntf as a port of that bridge. + bridgeName, err := util.NicToBridge(intfName) + if err != nil { + return nil, fmt.Errorf("nicToBridge failed for %s: %w", intfName, err) + } + res.bridgeName = bridgeName + res.gwIface = bridgeName + res.uplinkName = intfName + gwIntf = bridgeName + } else { + // gateway interface is an OVS bridge + uplinkName, err := getIntfName(intfName) + if err != nil { + if config.Gateway.Mode == config.GatewayModeLocal && config.Gateway.AllowNoUplink { + klog.Infof("Could not find uplink for %s, setup gateway bridge with no uplink port, egress IP and egress GW will not work", intfName) + } else { + return nil, fmt.Errorf("failed to find intfName for %s: %w", intfName, err) + } + } else { + res.uplinkName = uplinkName + } + res.bridgeName = intfName + res.gwIface = intfName + } + // Now, we get IP addresses for the bridge + if len(gwIPs) > 0 { + // use gwIPs if provided + res.ips = gwIPs + } else { + // get IP addresses from OVS bridge. If IP does not exist, + // error out. + res.ips, err = nodeutil.GetNetworkInterfaceIPAddresses(gwIntf) + if err != nil { + return nil, fmt.Errorf("failed to get interface details for %s: %w", gwIntf, err) + } + } + + if !isGWAcclInterface { // We do not have an accelerated device for Gateway interface + res.macAddress, err = util.GetOVSPortMACAddress(gwIntf) + if err != nil { + return nil, fmt.Errorf("failed to get MAC address for ovs port %s: %w", gwIntf, err) + } + } + + res.interfaceID, err = bridgedGatewayNodeSetup(nodeName, res.bridgeName, physicalNetworkName) + if err != nil { + return nil, fmt.Errorf("failed to set up shared interface gateway: %v", err) + } + + // the name of the patch port created by ovn-controller is of the form + // patch--to-br-int + defaultNetConfig.PatchPort = (&util.DefaultNetInfo{}).GetNetworkScopedPatchPortName(res.bridgeName, nodeName) + + // for DPU we use the host MAC address for the Gateway configuration + if config.OvnKubeNode.Mode == types.NodeModeDPU { + hostRep, err := util.GetDPUHostInterface(res.bridgeName) + if err != nil { + return nil, err + } + res.macAddress, err = util.GetSriovnetOps().GetRepresentorPeerMacAddress(hostRep) + if err != nil { + return nil, err + } + } + + // If gwIface is set, then accelerated GW interface is present and we use it. Else use external bridge instead. + if res.gwIface == "" { + res.gwIface = res.bridgeName + } + + return &res, nil +} + +func (b *BridgeConfiguration) GetGatewayIface() string { + return b.gwIface +} + +// UpdateInterfaceIPAddresses sets and returns the bridge's current ips +func (b *BridgeConfiguration) UpdateInterfaceIPAddresses(node *corev1.Node) ([]*net.IPNet, error) { + b.mutex.Lock() + defer b.mutex.Unlock() + ifAddrs, err := nodeutil.GetNetworkInterfaceIPAddresses(b.GetGatewayIface()) + if err != nil { + return nil, err + } + + // For DPU, here we need to use the DPU host's IP address which is the tenant cluster's + // host internal IP address instead of the DPU's external bridge IP address. + if config.OvnKubeNode.Mode == types.NodeModeDPU { + nodeIfAddr, err := util.GetNodePrimaryDPUHostAddrAnnotation(node) + if err != nil { + return nil, err + } + // For DPU mode, we only support IPv4 for now. + nodeAddrStr := nodeIfAddr.IPv4 + + nodeAddr, _, err := net.ParseCIDR(nodeAddrStr) + if err != nil { + return nil, fmt.Errorf("failed to parse node IP address. %v", nodeAddrStr) + } + ifAddrs, err = nodeutil.GetDPUHostPrimaryIPAddresses(nodeAddr, ifAddrs) + if err != nil { + return nil, err + } + } + + b.ips = ifAddrs + return ifAddrs, nil +} + +// GetPortConfigurations returns a slice of Network port configurations along with the +// uplinkName and physical port's ofport value +func (b *BridgeConfiguration) GetPortConfigurations() ([]*BridgeUDNConfiguration, string, string) { + b.mutex.Lock() + defer b.mutex.Unlock() + var netConfigs []*BridgeUDNConfiguration + for _, netConfig := range b.netConfig { + netConfigs = append(netConfigs, netConfig.ShallowCopy()) + } + return netConfigs, b.uplinkName, b.ofPortPhys +} + +// AddNetworkConfig adds the patchport and ctMark value for the provided netInfo into the bridge configuration cache +func (b *BridgeConfiguration) AddNetworkConfig( + nInfo util.NetInfo, + nodeSubnets []*net.IPNet, + masqCTMark, pktMark uint, + v6MasqIPs, v4MasqIPs *udn.MasqueradeIPs) error { + b.mutex.Lock() + defer b.mutex.Unlock() + + netName := nInfo.GetNetworkName() + patchPort := nInfo.GetNetworkScopedPatchPortName(b.bridgeName, b.nodeName) + + _, found := b.netConfig[netName] + if !found { + netConfig := &BridgeUDNConfiguration{ + PatchPort: patchPort, + MasqCTMark: fmt.Sprintf("0x%x", masqCTMark), + PktMark: fmt.Sprintf("0x%x", pktMark), + V4MasqIPs: v4MasqIPs, + V6MasqIPs: v6MasqIPs, + Subnets: nInfo.Subnets(), + NodeSubnets: nodeSubnets, + } + netConfig.Advertised.Store(util.IsPodNetworkAdvertisedAtNode(nInfo, b.nodeName)) + + b.netConfig[netName] = netConfig + } else { + klog.Warningf("Trying to update bridge config for network %s which already"+ + "exists in cache...networks are not mutable...ignoring update", nInfo.GetNetworkName()) + } + return nil +} + +// DelNetworkConfig deletes the provided netInfo from the bridge configuration cache +func (b *BridgeConfiguration) DelNetworkConfig(nInfo util.NetInfo) { + b.mutex.Lock() + defer b.mutex.Unlock() + + delete(b.netConfig, nInfo.GetNetworkName()) +} + +func (b *BridgeConfiguration) GetNetworkConfig(networkName string) *BridgeUDNConfiguration { + b.mutex.Lock() + defer b.mutex.Unlock() + return b.netConfig[networkName] +} + +// GetActiveNetworkBridgeConfigCopy returns a shallow copy of the network configuration corresponding to the +// provided netInfo. +// +// NOTE: if the network configuration can't be found or if the network is not patched by OVN +// yet this returns nil. +func (b *BridgeConfiguration) GetActiveNetworkBridgeConfigCopy(networkName string) *BridgeUDNConfiguration { + b.mutex.Lock() + defer b.mutex.Unlock() + + if netConfig, found := b.netConfig[networkName]; found && netConfig.OfPortPatch != "" { + return netConfig.ShallowCopy() + } + return nil +} + +// must be called with mutex held +func (b *BridgeConfiguration) patchedNetConfigs() []*BridgeUDNConfiguration { + result := make([]*BridgeUDNConfiguration, 0, len(b.netConfig)) + for _, netConfig := range b.netConfig { + if netConfig.OfPortPatch == "" { + continue + } + result = append(result, netConfig) + } + return result +} + +// IsGatewayReady checks if patch ports of every netConfig are present. +// used by gateway on newGateway readyFunc +func (b *BridgeConfiguration) IsGatewayReady() bool { + b.mutex.Lock() + defer b.mutex.Unlock() + for _, netConfig := range b.netConfig { + ready := gatewayReady(netConfig.PatchPort) + if !ready { + return false + } + } + return true +} + +func (b *BridgeConfiguration) SetOfPorts() error { + b.mutex.Lock() + defer b.mutex.Unlock() + // Get ofport of patchPort + for _, netConfig := range b.netConfig { + if err := netConfig.setOfPatchPort(); err != nil { + return fmt.Errorf("error setting bridge openflow ports for network with patchport %v: err: %v", netConfig.PatchPort, err) + } + } + + if b.uplinkName != "" { + // Get ofport of physical interface + ofportPhys, stderr, err := util.GetOVSOfPort("get", "interface", b.uplinkName, "ofport") + if err != nil { + return fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", + b.uplinkName, stderr, err) + } + b.ofPortPhys = ofportPhys + } + + // Get ofport representing the host. That is, host representor port in case of DPUs, ovsLocalPort otherwise. + if config.OvnKubeNode.Mode == types.NodeModeDPU { + var stderr string + hostRep, err := util.GetDPUHostInterface(b.bridgeName) + if err != nil { + return err + } + + b.ofPortHost, stderr, err = util.RunOVSVsctl("get", "interface", hostRep, "ofport") + if err != nil { + return fmt.Errorf("failed to get ofport of host interface %s, stderr: %q, error: %v", + hostRep, stderr, err) + } + } else { + var err error + if b.gwIfaceRep != "" { + b.ofPortHost, _, err = util.RunOVSVsctl("get", "interface", b.gwIfaceRep, "ofport") + if err != nil { + return fmt.Errorf("failed to get ofport of bypass rep %s, error: %v", b.gwIfaceRep, err) + } + } else { + b.ofPortHost = nodetypes.OvsLocalPort + } + } + + return nil +} + +func (b *BridgeConfiguration) GetIPs() []*net.IPNet { + b.mutex.Lock() + defer b.mutex.Unlock() + return b.ips +} + +func (b *BridgeConfiguration) GetBridgeName() string { + return b.bridgeName +} + +func (b *BridgeConfiguration) GetUplinkName() string { + return b.uplinkName +} + +func (b *BridgeConfiguration) GetMAC() net.HardwareAddr { + b.mutex.Lock() + defer b.mutex.Unlock() + return b.macAddress +} + +func (b *BridgeConfiguration) SetMAC(macAddr net.HardwareAddr) { + b.mutex.Lock() + defer b.mutex.Unlock() + b.macAddress = macAddr +} + +func (b *BridgeConfiguration) SetNetworkOfPatchPort(netName string) error { + b.mutex.Lock() + defer b.mutex.Unlock() + + netConfig, found := b.netConfig[netName] + if !found { + return fmt.Errorf("failed to find network %s configuration on bridge %s", netName, b.bridgeName) + } + return netConfig.setOfPatchPort() +} + +func (b *BridgeConfiguration) GetInterfaceID() string { + return b.interfaceID +} + +func (b *BridgeConfiguration) GetOfPortHost() string { + b.mutex.Lock() + defer b.mutex.Unlock() + return b.ofPortHost +} + +func (b *BridgeConfiguration) GetEIPMarkIPs() *egressip.MarkIPsCache { + b.mutex.Lock() + defer b.mutex.Unlock() + return b.eipMarkIPs +} + +func (b *BridgeConfiguration) SetEIPMarkIPs(eipMarkIPs *egressip.MarkIPsCache) { + b.mutex.Lock() + defer b.mutex.Unlock() + b.eipMarkIPs = eipMarkIPs +} + +func gatewayReady(patchPort string) bool { + // Get ofport of patchPort + ofport, _, err := util.GetOVSOfPort("--if-exists", "get", "interface", patchPort, "ofport") + if err != nil || len(ofport) == 0 { + return false + } + klog.Info("Gateway is ready") + return true +} + +func getIntfName(gatewayIntf string) (string, error) { + // The given (or autodetected) interface is an OVS bridge and this could be + // created by us using util.NicToBridge() or it was pre-created by the user. + + // Is intfName a port of gatewayIntf? + intfName, err := util.GetNicName(gatewayIntf) + if err != nil { + return "", err + } + _, stderr, err := util.RunOVSVsctl("get", "interface", intfName, "ofport") + if err != nil { + return "", fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", + intfName, stderr, err) + } + return intfName, nil +} + +// bridgedGatewayNodeSetup enables forwarding on bridge interface, sets up the physical network name mappings for the bridge, +// and returns an ifaceID created from the bridge name and the node name +func bridgedGatewayNodeSetup(nodeName, bridgeName, physicalNetworkName string) (string, error) { + // IPv6 forwarding is enabled globally + if config.IPv4Mode { + // we use forward slash as path separator to allow dotted bridgeName e.g. foo.200 + stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net/ipv4/conf/%s/forwarding=1", bridgeName)) + // systctl output enforces dot as path separator + if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.forwarding = 1", strings.ReplaceAll(bridgeName, ".", "/")) { + return "", fmt.Errorf("could not set the correct forwarding value for interface %s: stdout: %v, stderr: %v, err: %v", + bridgeName, stdout, stderr, err) + } + } + + // ovn-bridge-mappings maps a physical network name to a local ovs bridge + // that provides connectivity to that network. It is in the form of physnet1:br1,physnet2:br2. + // Note that there may be multiple ovs bridge mappings, be sure not to override + // the mappings for the other physical network + stdout, stderr, err := util.RunOVSVsctl("--if-exists", "get", "Open_vSwitch", ".", + "external_ids:ovn-bridge-mappings") + if err != nil { + return "", fmt.Errorf("failed to get ovn-bridge-mappings stderr:%s (%v)", stderr, err) + } + // skip the existing mapping setting for the specified physicalNetworkName + mapString := "" + bridgeMappings := strings.Split(stdout, ",") + for _, bridgeMapping := range bridgeMappings { + m := strings.Split(bridgeMapping, ":") + if network := m[0]; network != physicalNetworkName { + if len(mapString) != 0 { + mapString += "," + } + mapString += bridgeMapping + } + } + if len(mapString) != 0 { + mapString += "," + } + mapString += physicalNetworkName + ":" + bridgeName + + _, stderr, err = util.RunOVSVsctl("set", "Open_vSwitch", ".", + fmt.Sprintf("external_ids:ovn-bridge-mappings=%s", mapString)) + if err != nil { + return "", fmt.Errorf("failed to set ovn-bridge-mappings for ovs bridge %s"+ + ", stderr:%s (%v)", bridgeName, stderr, err) + } + + ifaceID := bridgeName + "_" + nodeName + return ifaceID, nil +} + +func getRepresentor(intfName string) (string, error) { + deviceID, err := util.GetDeviceIDFromNetdevice(intfName) + if err != nil { + return "", err + } + + return util.GetFunctionRepresentorName(deviceID) +} diff --git a/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go new file mode 100644 index 0000000000..8395baf06d --- /dev/null +++ b/go-controller/pkg/node/bridgeconfig/bridgeconfig_testutil.go @@ -0,0 +1,139 @@ +package bridgeconfig + +import ( + "fmt" + "net" + "strings" + + net2 "k8s.io/utils/net" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestDefaultBridgeConfig() *BridgeConfiguration { + defaultNetConfig := &BridgeUDNConfiguration{ + OfPortPatch: "patch-breth0_ov", + } + return &BridgeConfiguration{ + netConfig: map[string]*BridgeUDNConfiguration{ + types.DefaultNetworkName: defaultNetConfig, + }, + } +} + +func TestBridgeConfig(brName string) *BridgeConfiguration { + return &BridgeConfiguration{ + bridgeName: brName, + gwIface: brName, + } +} + +func (b *BridgeConfiguration) GetNetConfigLen() int { + b.mutex.Lock() + defer b.mutex.Unlock() + return len(b.netConfig) +} + +func CheckUDNSvcIsolationOVSFlows(flows []string, netConfig *BridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { + By(fmt.Sprintf("Checking UDN %s service isolation flows for %s; expected %d flows", + netName, svcCIDR.String(), expectedNFlows)) + + var mgmtMasqIP string + var protoPrefix string + if net2.IsIPv4CIDR(svcCIDR) { + mgmtMasqIP = netConfig.V4MasqIPs.ManagementPort.IP.String() + protoPrefix = "ip" + } else { + mgmtMasqIP = netConfig.V6MasqIPs.ManagementPort.IP.String() + protoPrefix = "ip6" + } + + var nFlows int + for _, flow := range flows { + if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=drop", + protoPrefix, protoPrefix, mgmtMasqIP)) { + nFlows++ + } + } + + Expect(nFlows).To(Equal(expectedNFlows)) +} + +func CheckAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *BridgeUDNConfiguration, netName string, svcCIDR *net.IPNet, expectedNFlows int) { + By(fmt.Sprintf("Checking advertised UDN %s service isolation flows for %s; expected %d flows", + netName, svcCIDR.String(), expectedNFlows)) + + var matchingIPFamilySubnet *net.IPNet + var protoPrefix string + var udnAdvertisedSubnets []*net.IPNet + var err error + for _, clusterEntry := range netConfig.Subnets { + udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) + } + if net2.IsIPv4CIDR(svcCIDR) { + matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) + Expect(err).ToNot(HaveOccurred()) + protoPrefix = "ip" + } else { + matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(true, udnAdvertisedSubnets) + Expect(err).ToNot(HaveOccurred()) + protoPrefix = "ip6" + } + + var nFlows int + for _, flow := range flows { + if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=drop", + protoPrefix, protoPrefix, matchingIPFamilySubnet)) { + nFlows++ + } + if strings.Contains(flow, fmt.Sprintf("priority=550, in_port=LOCAL, %s, %s_src=%s, %s_dst=%s, actions=ct(commit,zone=64001,table=2)", + protoPrefix, protoPrefix, matchingIPFamilySubnet, protoPrefix, svcCIDR)) { + nFlows++ + } + } + + Expect(nFlows).To(Equal(expectedNFlows)) +} + +func CheckDefaultSvcIsolationOVSFlows(flows []string, defaultConfig *BridgeUDNConfiguration, ofPortHost, bridgeMAC string, svcCIDR *net.IPNet) { + By(fmt.Sprintf("Checking default service isolation flows for %s", svcCIDR.String())) + + var masqIP string + var masqSubnet string + var protoPrefix string + if net2.IsIPv4CIDR(svcCIDR) { + protoPrefix = "ip" + masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() + masqSubnet = config.Gateway.V4MasqueradeSubnet + } else { + protoPrefix = "ip6" + masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() + masqSubnet = config.Gateway.V6MasqueradeSubnet + } + + var nTable0DefaultFlows int + var nTable0UDNMasqFlows int + var nTable2Flows int + for _, flow := range flows { + if strings.Contains(flow, fmt.Sprintf("priority=500, in_port=%s, %s, %s_dst=%s, actions=ct(commit,zone=%d,nat(src=%s),table=2)", + ofPortHost, protoPrefix, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone, + masqIP)) { + nTable0DefaultFlows++ + } else if strings.Contains(flow, fmt.Sprintf("priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, actions=ct(commit,zone=%d,table=2)", + ofPortHost, protoPrefix, protoPrefix, masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) { + nTable0UDNMasqFlows++ + } else if strings.Contains(flow, fmt.Sprintf("priority=100, table=2, actions=set_field:%s->eth_dst,output:%s", + bridgeMAC, defaultConfig.OfPortPatch)) { + nTable2Flows++ + } + } + + Expect(nTable0DefaultFlows).To(Equal(1)) + Expect(nTable0UDNMasqFlows).To(Equal(1)) + Expect(nTable2Flows).To(Equal(1)) +} diff --git a/go-controller/pkg/node/bridgeconfig/bridgeflows.go b/go-controller/pkg/node/bridgeconfig/bridgeflows.go new file mode 100644 index 0000000000..200c1540ec --- /dev/null +++ b/go-controller/pkg/node/bridgeconfig/bridgeflows.go @@ -0,0 +1,975 @@ +package bridgeconfig + +import ( + "fmt" + "net" + + "k8s.io/klog/v2" + utilnet "k8s.io/utils/net" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" + nodeutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +func (b *BridgeConfiguration) DefaultBridgeFlows(hostSubnets []*net.IPNet, extraIPs []net.IP) ([]string, error) { + b.mutex.Lock() + defer b.mutex.Unlock() + dftFlows, err := b.flowsForDefaultBridge(extraIPs) + if err != nil { + return nil, err + } + dftCommonFlows, err := b.commonFlows(hostSubnets) + if err != nil { + return nil, err + } + return append(dftFlows, dftCommonFlows...), nil +} + +func (b *BridgeConfiguration) ExternalBridgeFlows(hostSubnets []*net.IPNet) ([]string, error) { + b.mutex.Lock() + defer b.mutex.Unlock() + return b.commonFlows(hostSubnets) +} + +// must be called with bridge.mutex held +func (b *BridgeConfiguration) flowsForDefaultBridge(extraIPs []net.IP) ([]string, error) { + // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure + // that dl_src is included in match criteria! + + ofPortPhys := b.ofPortPhys + bridgeMacAddress := b.macAddress.String() + ofPortHost := b.ofPortHost + bridgeIPs := b.ips + + var dftFlows []string + // 14 bytes of overhead for ethernet header (does not include VLAN) + maxPktLength := getMaxFrameLength() + + strip_vlan := "" + mod_vlan_id := "" + match_vlan := "" + if config.Gateway.VLANID != 0 { + strip_vlan = "strip_vlan," + match_vlan = fmt.Sprintf("dl_vlan=%d,", config.Gateway.VLANID) + mod_vlan_id = fmt.Sprintf("mod_vlan_vid:%d,", config.Gateway.VLANID) + } + + if config.IPv4Mode { + // table0, Geneve packets coming from external. Skip conntrack and go directly to host + // if dest mac is the shared mac send directly to host. + if ofPortPhys != "" { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=205, in_port=%s, dl_dst=%s, udp, udp_dst=%d, "+ + "actions=output:%s", nodetypes.DefaultOpenFlowCookie, ofPortPhys, bridgeMacAddress, config.Default.EncapPort, + ofPortHost)) + // perform NORMAL action otherwise. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp, udp_dst=%d, "+ + "actions=NORMAL", nodetypes.DefaultOpenFlowCookie, ofPortPhys, config.Default.EncapPort)) + + // table0, Geneve packets coming from LOCAL/Host OFPort. Skip conntrack and go directly to external + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp, udp_dst=%d, "+ + "actions=output:%s", nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Default.EncapPort, ofPortPhys)) + } + physicalIP, err := util.MatchFirstIPNetFamily(false, bridgeIPs) + if err != nil { + return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) + } + for _, netConfig := range b.patchedNetConfigs() { + // table 0, SVC Hairpin from OVN destined to local host, DNAT and go to table 4 + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ + "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String(), physicalIP.IP, + config.Default.HostMasqConntrackZone, physicalIP.IP)) + } + + // table 0, hairpin from OVN destined to local host (but an additional node IP), send to table 4 + for _, ip := range extraIPs { + if ip.To4() == nil { + continue + } + // not needed for the physical IP + if ip.Equal(physicalIP.IP) { + continue + } + + // not needed for special masquerade IP + if ip.Equal(config.Gateway.MasqueradeIPs.V4HostMasqueradeIP) { + continue + } + + for _, netConfig := range b.patchedNetConfigs() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ + "actions=ct(commit,zone=%d,table=4)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, ip.String(), physicalIP.IP, + config.Default.HostMasqConntrackZone)) + } + } + + // table 0, Reply SVC traffic from Host -> OVN, unSNAT and goto table 5 + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s,"+ + "actions=ct(zone=%d,nat,table=5)", + nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) + } + if config.IPv6Mode { + if ofPortPhys != "" { + // table0, Geneve packets coming from external. Skip conntrack and go directly to host + // if dest mac is the shared mac send directly to host. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=205, in_port=%s, dl_dst=%s, udp6, udp_dst=%d, "+ + "actions=output:%s", nodetypes.DefaultOpenFlowCookie, ofPortPhys, bridgeMacAddress, config.Default.EncapPort, + ofPortHost)) + // perform NORMAL action otherwise. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp6, udp_dst=%d, "+ + "actions=NORMAL", nodetypes.DefaultOpenFlowCookie, ofPortPhys, config.Default.EncapPort)) + + // table0, Geneve packets coming from LOCAL. Skip conntrack and send to external + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp6, udp_dst=%d, "+ + "actions=output:%s", nodetypes.DefaultOpenFlowCookie, nodetypes.OvsLocalPort, config.Default.EncapPort, ofPortPhys)) + } + + physicalIP, err := util.MatchFirstIPNetFamily(true, bridgeIPs) + if err != nil { + return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) + } + // table 0, SVC Hairpin from OVN destined to local host, DNAT to host, send to table 4 + for _, netConfig := range b.patchedNetConfigs() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ + "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String(), physicalIP.IP, + config.Default.HostMasqConntrackZone, physicalIP.IP)) + } + + // table 0, hairpin from OVN destined to local host (but an additional node IP), send to table 4 + for _, ip := range extraIPs { + if ip.To4() != nil { + continue + } + // not needed for the physical IP + if ip.Equal(physicalIP.IP) { + continue + } + + // not needed for special masquerade IP + if ip.Equal(config.Gateway.MasqueradeIPs.V6HostMasqueradeIP) { + continue + } + + for _, netConfig := range b.patchedNetConfigs() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ + "actions=ct(commit,zone=%d,table=4)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, ip.String(), physicalIP.IP, + config.Default.HostMasqConntrackZone)) + } + } + + // table 0, Reply SVC traffic from Host -> OVN, unSNAT and goto table 5 + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s,"+ + "actions=ct(zone=%d,nat,table=5)", + nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) + } + + var protoPrefix, masqIP, masqSubnet string + + // table 0, packets coming from Host -> Service + for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { + if utilnet.IsIPv4CIDR(svcCIDR) { + protoPrefix = "ip" + masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() + masqSubnet = config.Gateway.V4MasqueradeSubnet + } else { + protoPrefix = "ipv6" + masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() + masqSubnet = config.Gateway.V6MasqueradeSubnet + } + + // table 0, Host (default network) -> OVN towards SVC, SNAT to special IP. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_dst=%s, "+ + "actions=ct(commit,zone=%d,nat(src=%s),table=2)", + nodetypes.DefaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, + svcCIDR, config.Default.HostMasqConntrackZone, masqIP)) + + if util.IsNetworkSegmentationSupportEnabled() { + // table 0, Host (UDNs) -> OVN towards SVC, SNAT to special IP. + // For packets originating from UDN, commit without NATing, those + // have already been SNATed to the masq IP of the UDN. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, "+ + "actions=ct(commit,zone=%d,table=2)", + nodetypes.DefaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, + masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) + if util.IsRouteAdvertisementsEnabled() { + // If the UDN is advertised then instead of matching on the masqSubnet + // we match on the UDNPodSubnet itself and we also don't SNAT to 169.254.0.2 + // sample flow: cookie=0xdeff105, duration=1472.742s, table=0, n_packets=9, n_bytes=666, priority=550 + // ip,in_port=LOCAL,nw_src=103.103.0.0/16,nw_dst=10.96.0.0/16 actions=ct(commit,table=2,zone=64001) + for _, netConfig := range b.patchedNetConfigs() { + if netConfig.IsDefaultNetwork() { + continue + } + if netConfig.Advertised.Load() { + var udnAdvertisedSubnets []*net.IPNet + for _, clusterEntry := range netConfig.Subnets { + udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) + } + // Filter subnets based on the clusterIP service family + // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one + matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(svcCIDR), udnAdvertisedSubnets) + if err != nil { + klog.Infof("Unable to determine UDN subnet for the provided family isIPV6: %t, %v", utilnet.IsIPv6CIDR(svcCIDR), err) + continue + } + + // Use the filtered subnet for the flow compute instead of the masqueradeIP + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, "+ + "actions=ct(commit,zone=%d,table=2)", + nodetypes.DefaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, + matchingIPFamilySubnet.String(), protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) + } + } + } + } + + masqDst := masqIP + if util.IsNetworkSegmentationSupportEnabled() { + // In UDN match on the whole masquerade subnet to handle replies from UDN enabled services + masqDst = masqSubnet + } + for _, netConfig := range b.patchedNetConfigs() { + // table 0, Reply hairpin traffic to host, coming from OVN, unSNAT + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_src=%s, %s_dst=%s,"+ + "actions=ct(zone=%d,nat,table=3)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefix, protoPrefix, svcCIDR, + protoPrefix, masqDst, config.Default.HostMasqConntrackZone)) + // table 0, Reply traffic coming from OVN to outside, drop it if the DNAT wasn't done either + // at the GR load balancer or switch load balancer. It means the correct port wasn't provided. + // nodeCIDR->serviceCIDR traffic flow is internal and it shouldn't be carried to outside the cluster + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=115, in_port=%s, %s, %s_dst=%s,"+ + "actions=drop", nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, protoPrefix, protoPrefix, svcCIDR)) + } + } + + // table 0, add IP fragment reassembly flows, only needed in SGW mode with + // physical interface attached to bridge + if config.Gateway.Mode == config.GatewayModeShared && ofPortPhys != "" { + reassemblyFlows := generateIPFragmentReassemblyFlow(ofPortPhys) + dftFlows = append(dftFlows, reassemblyFlows...) + } + if ofPortPhys != "" { + for _, netConfig := range b.patchedNetConfigs() { + var actions string + if config.Gateway.Mode != config.GatewayModeLocal || config.Gateway.DisablePacketMTUCheck { + actions = fmt.Sprintf("output:%s", netConfig.OfPortPatch) + } else { + // packets larger than known acceptable MTU need to go to kernel for + // potential fragmentation + // introduced specifically for replies to egress traffic not routed + // through the host + actions = fmt.Sprintf("check_pkt_larger(%d)->reg0[0],resubmit(,11)", maxPktLength) + } + + if config.IPv4Mode { + // table 1, established and related connections in zone 64000 with ct_mark CtMarkOVN go to OVN + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+est, ct_mark=%s, "+ + "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+rel, ct_mark=%s, "+ + "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) + + } + + if config.IPv6Mode { + // table 1, established and related connections in zone 64000 with ct_mark CtMarkOVN go to OVN + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+est, ct_mark=%s, "+ + "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+rel, ct_mark=%s, "+ + "actions=%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, actions)) + } + } + if config.IPv4Mode { + // table 1, established and related connections in zone 64000 with ct_mark CtMarkHost go to host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip, ct_state=+trk+est, ct_mark=%s, "+ + "actions=%soutput:%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip, ct_state=+trk+rel, ct_mark=%s, "+ + "actions=%soutput:%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) + + } + if config.IPv6Mode { + // table 1, established and related connections in zone 64000 with ct_mark CtMarkHost go to host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip6, ct_state=+trk+est, ct_mark=%s, "+ + "actions=%soutput:%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip6, ct_state=+trk+rel, ct_mark=%s, "+ + "actions=%soutput:%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, nodetypes.CtMarkHost, strip_vlan, ofPortHost)) + + } + + // table 1, we check to see if this dest mac is the shared mac, if so send to host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=1, %s dl_dst=%s, actions=%soutput:%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) + } + + defaultNetConfig := b.netConfig[types.DefaultNetworkName] + + // table 2, dispatch from Host -> OVN + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, table=2, "+ + "actions=set_field:%s->eth_dst,%soutput:%s", nodetypes.DefaultOpenFlowCookie, + bridgeMacAddress, mod_vlan_id, defaultNetConfig.OfPortPatch)) + + // table 2, priority 200, dispatch from UDN -> Host -> OVN. These packets have + // already been SNATed to the UDN's masquerade IP or have been marked with the UDN's packet mark. + if config.IPv4Mode { + for _, netConfig := range b.patchedNetConfigs() { + if netConfig.IsDefaultNetwork() { + continue + } + if util.IsRouteAdvertisementsEnabled() && netConfig.Advertised.Load() { + var udnAdvertisedSubnets []*net.IPNet + for _, clusterEntry := range netConfig.Subnets { + udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) + } + // Filter subnets based on the clusterIP service family + // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one + matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) + if err != nil { + klog.Infof("Unable to determine IPV4 UDN subnet for the provided family isIPV6: %v", err) + continue + } + // In addition to the masqueradeIP based flows, we also need the podsubnet based flows for + // advertised networks since UDN pod to clusterIP is unSNATed and we need this traffic to be taken into + // the correct patch port of it's own network where it's a deadend if the clusterIP is not part of + // that UDN network and works if it is part of the UDN network. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, table=2, ip, ip_src=%s, "+ + "actions=drop", + nodetypes.DefaultOpenFlowCookie, matchingIPFamilySubnet.String())) + } + // Drop traffic coming from the masquerade IP or the UDN subnet(for advertised UDNs) to ensure that + // isolation between networks is enforced. This handles the case where a pod on the UDN subnet is sending traffic to + // a service in another UDN. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, table=2, ip, ip_src=%s, "+ + "actions=drop", + nodetypes.DefaultOpenFlowCookie, netConfig.V4MasqIPs.ManagementPort.IP.String())) + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=250, table=2, ip, pkt_mark=%s, "+ + "actions=set_field:%s->eth_dst,output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.PktMark, + bridgeMacAddress, netConfig.OfPortPatch)) + } + } + + if config.IPv6Mode { + for _, netConfig := range b.patchedNetConfigs() { + if netConfig.IsDefaultNetwork() { + continue + } + if util.IsRouteAdvertisementsEnabled() && netConfig.Advertised.Load() { + var udnAdvertisedSubnets []*net.IPNet + for _, clusterEntry := range netConfig.Subnets { + udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) + } + // Filter subnets based on the clusterIP service family + // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one + matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(true, udnAdvertisedSubnets) + if err != nil { + klog.Infof("Unable to determine IPV6 UDN subnet for the provided family isIPV6: %v", err) + continue + } + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, ipv6_src=%s, "+ + "actions=drop", + nodetypes.DefaultOpenFlowCookie, matchingIPFamilySubnet.String())) + } + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, ipv6_src=%s, "+ + "actions=drop", + nodetypes.DefaultOpenFlowCookie, netConfig.V6MasqIPs.ManagementPort.IP.String())) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=250, table=2, ip6, pkt_mark=%s, "+ + "actions=set_field:%s->eth_dst,output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.PktMark, + bridgeMacAddress, netConfig.OfPortPatch)) + } + } + + // table 3, dispatch from OVN -> Host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=3, %s "+ + "actions=move:NXM_OF_ETH_DST[]->NXM_OF_ETH_SRC[],set_field:%s->eth_dst,%soutput:%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) + + // table 4, hairpinned pkts that need to go from OVN -> Host + // We need to SNAT and masquerade OVN GR IP, send to table 3 for dispatch to Host + if config.IPv4Mode { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=4,ip,"+ + "actions=ct(commit,zone=%d,nat(src=%s),table=3)", + nodetypes.DefaultOpenFlowCookie, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String())) + } + if config.IPv6Mode { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=4,ipv6, "+ + "actions=ct(commit,zone=%d,nat(src=%s),table=3)", + nodetypes.DefaultOpenFlowCookie, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String())) + } + // table 5, Host Reply traffic to hairpinned svc, need to unDNAT, send to table 2 + if config.IPv4Mode { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=5, ip, "+ + "actions=ct(commit,zone=%d,nat,table=2)", + nodetypes.DefaultOpenFlowCookie, config.Default.HostMasqConntrackZone)) + } + if config.IPv6Mode { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, table=5, ipv6, "+ + "actions=ct(commit,zone=%d,nat,table=2)", + nodetypes.DefaultOpenFlowCookie, config.Default.HostMasqConntrackZone)) + } + return dftFlows, nil +} + +// getMaxFrameLength returns the maximum frame size (ignoring VLAN header) that a gateway can handle +func getMaxFrameLength() int { + return config.Default.MTU + 14 +} + +// generateIPFragmentReassemblyFlow adds flows in table 0 that send packets to a +// specific conntrack zone for reassembly with the same priority as node port +// flows that match on L4 fields. After reassembly packets are reinjected to +// table 0 again. This requires a conntrack immplementation that reassembles +// fragments. This reqreuiment is met for the kernel datapath with the netfilter +// module loaded. This reqreuiment is not met for the userspace datapath. +func generateIPFragmentReassemblyFlow(ofPortPhys string) []string { + flows := make([]string, 0, 2) + if config.IPv4Mode { + flows = append(flows, + fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, ip, nw_frag=yes, actions=ct(table=0,zone=%d)", + nodetypes.DefaultOpenFlowCookie, + ofPortPhys, + config.Default.ReassemblyConntrackZone, + ), + ) + } + if config.IPv6Mode { + flows = append(flows, + fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, ipv6, nw_frag=yes, actions=ct(table=0,zone=%d)", + nodetypes.DefaultOpenFlowCookie, + ofPortPhys, + config.Default.ReassemblyConntrackZone, + ), + ) + } + + return flows +} + +// must be called with bridge.mutex held +func (b *BridgeConfiguration) commonFlows(hostSubnets []*net.IPNet) ([]string, error) { + // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure + // that dl_src is included in match criteria! + ofPortPhys := b.ofPortPhys + bridgeMacAddress := b.macAddress.String() + ofPortHost := b.ofPortHost + bridgeIPs := b.ips + + var dftFlows []string + + strip_vlan := "" + match_vlan := "" + mod_vlan_id := "" + if config.Gateway.VLANID != 0 { + strip_vlan = "strip_vlan," + match_vlan = fmt.Sprintf("dl_vlan=%d,", config.Gateway.VLANID) + mod_vlan_id = fmt.Sprintf("mod_vlan_vid:%d,", config.Gateway.VLANID) + } + + if ofPortPhys != "" { + // table 0, we check to see if this dest mac is the shared mac, if so flood to all ports + actions := "" + for _, netConfig := range b.patchedNetConfigs() { + actions += "output:" + netConfig.OfPortPatch + "," + } + + actions += strip_vlan + "output:" + ofPortHost + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=0, %s dl_dst=%s, actions=%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, bridgeMacAddress, actions)) + } + + // table 0, check packets coming from OVN have the correct mac address. Low priority flows that are a catch all + // for non-IP packets that would normally be forwarded with NORMAL action (table 0, priority 0 flow). + for _, netConfig := range b.patchedNetConfigs() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, dl_src=%s, actions=output:NORMAL", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=9, table=0, in_port=%s, actions=drop", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch)) + } + + if config.IPv4Mode { + physicalIP, err := util.MatchFirstIPNetFamily(false, bridgeIPs) + if err != nil { + return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) + } + if ofPortPhys != "" { + for _, netConfig := range b.patchedNetConfigs() { + // table0, packets coming from egressIP pods that have mark 1008 on them + // will be SNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR + // SNATs these into egressIP prior to reaching external bridge. + // egressService pods will also undergo this SNAT to nodeIP since these features are tied + // together at the OVN policy level on the distributed router. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%s "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, nodetypes.OvnKubeNodeSNATMark, + config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) + + // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to + // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. + if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && + config.Gateway.Mode != config.GatewayModeDisabled && b.eipMarkIPs != nil { + if netConfig.MasqCTMark != nodetypes.CtMarkOVN { + for mark, eip := range b.eipMarkIPs.GetIPv4() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%d, "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, mark, + config.Default.ConntrackZone, eip, netConfig.MasqCTMark, ofPortPhys)) + } + } + } + + // table 0, packets coming from pods headed externally. Commit connections with ct_mark CtMarkOVN + // so that reverse direction goes back to the pods. + if netConfig.IsDefaultNetwork() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, "+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, config.Default.ConntrackZone, + netConfig.MasqCTMark, ofPortPhys)) + + // Allow (a) OVN->host traffic on the same node + // (b) host->host traffic on the same node + if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { + dftFlows = append(dftFlows, hostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, false)...) + } + } else { + // for UDN we additionally SNAT the packet from masquerade IP -> node IP + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, ip_src=%s, "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, netConfig.V4MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, + physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) + } + } + + // table 0, packets coming from host Commit connections with ct_mark CtMarkHost + // so that reverse direction goes back to the host. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, ip, "+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), %soutput:%s", + nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, nodetypes.CtMarkHost, mod_vlan_id, ofPortPhys)) + } + if config.Gateway.Mode == config.GatewayModeLocal { + for _, netConfig := range b.patchedNetConfigs() { + // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. + // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp, nw_src=%s, "+ + "actions=ct(table=4,zone=%d)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp, nw_src=%s, "+ + "actions=ct(table=4,zone=%d)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp, nw_src=%s, "+ + "actions=ct(table=4,zone=%d)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + // We send BFD traffic coming from OVN to outside directly using a higher priority flow + if ofPortPhys != "" { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=650, table=0, in_port=%s, dl_src=%s, udp, tp_dst=3784, actions=output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, ofPortPhys)) + } + } + } + + if ofPortPhys != "" { + // table 0, packets coming from external or other localnet ports. Send it through conntrack and + // resubmit to table 1 to know the state and mark of the connection. + // Note, there are higher priority rules that take care of traffic coming from LOCAL and OVN ports. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=50, ip, actions=ct(zone=%d, nat, table=1)", + nodetypes.DefaultOpenFlowCookie, config.Default.ConntrackZone)) + } + } + + if config.IPv6Mode { + physicalIP, err := util.MatchFirstIPNetFamily(true, bridgeIPs) + if err != nil { + return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) + } + if ofPortPhys != "" { + for _, netConfig := range b.patchedNetConfigs() { + // table0, packets coming from egressIP pods that have mark 1008 on them + // will be DNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR + // DNATs these into egressIP prior to reaching external bridge. + // egressService pods will also undergo this SNAT to nodeIP since these features are tied + // together at the OVN policy level on the distributed router. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%s "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, nodetypes.OvnKubeNodeSNATMark, + config.Default.ConntrackZone, physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) + + // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to + // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. + if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && + config.Gateway.Mode != config.GatewayModeDisabled && b.eipMarkIPs != nil { + if netConfig.MasqCTMark != nodetypes.CtMarkOVN { + for mark, eip := range b.eipMarkIPs.GetIPv6() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%d, "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, mark, + config.Default.ConntrackZone, eip, netConfig.MasqCTMark, ofPortPhys)) + } + } + } + + // table 0, packets coming from pods headed externally. Commit connections with ct_mark CtMarkOVN + // so that reverse direction goes back to the pods. + if netConfig.IsDefaultNetwork() { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, "+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, config.Default.ConntrackZone, netConfig.MasqCTMark, ofPortPhys)) + + // Allow (a) OVN->host traffic on the same node + // (b) host->host traffic on the same node + if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { + dftFlows = append(dftFlows, hostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, true)...) + } + } else { + // for UDN we additionally SNAT the packet from masquerade IP -> node IP + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, ipv6_src=%s, "+ + "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, netConfig.V6MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, + physicalIP.IP, netConfig.MasqCTMark, ofPortPhys)) + } + } + + // table 0, packets coming from host. Commit connections with ct_mark CtMarkHost + // so that reverse direction goes back to the host. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=100, in_port=%s, ipv6, "+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), %soutput:%s", + nodetypes.DefaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, nodetypes.CtMarkHost, mod_vlan_id, ofPortPhys)) + + } + if config.Gateway.Mode == config.GatewayModeLocal { + for _, netConfig := range b.patchedNetConfigs() { + // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. + // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp6, ipv6_src=%s, "+ + "actions=ct(table=4,zone=%d)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp6, ipv6_src=%s, "+ + "actions=ct(table=4,zone=%d)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp6, ipv6_src=%s, "+ + "actions=ct(table=4,zone=%d)", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) + if ofPortPhys != "" { + // We send BFD traffic coming from OVN to outside directly using a higher priority flow + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=650, table=0, in_port=%s, dl_src=%s, udp6, tp_dst=3784, actions=output:%s", + nodetypes.DefaultOpenFlowCookie, netConfig.OfPortPatch, bridgeMacAddress, ofPortPhys)) + } + } + } + if ofPortPhys != "" { + // table 0, packets coming from external. Send it through conntrack and + // resubmit to table 1 to know the state and mark of the connection. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=50, in_port=%s, ipv6, "+ + "actions=ct(zone=%d, nat, table=1)", nodetypes.DefaultOpenFlowCookie, ofPortPhys, config.Default.ConntrackZone)) + } + } + // Egress IP is often configured on a node different from the one hosting the affected pod. + // Due to the fact that ovn-controllers on different nodes apply the changes independently, + // there is a chance that the pod traffic will reach the egress node before it configures the SNAT flows. + // Drop pod traffic that is not SNATed, excluding local pods(required for ICNIv2) + defaultNetConfig := b.netConfig[types.DefaultNetworkName] + if config.OVNKubernetesFeature.EnableEgressIP { + for _, clusterEntry := range config.Default.ClusterSubnets { + cidr := clusterEntry.CIDR + ipv := getIPv(cidr) + // table 0, drop packets coming from pods headed externally that were not SNATed. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=104, in_port=%s, %s, %s_src=%s, actions=drop", + nodetypes.DefaultOpenFlowCookie, defaultNetConfig.OfPortPatch, ipv, ipv, cidr)) + } + for _, subnet := range defaultNetConfig.NodeSubnets { + ipv := getIPv(subnet) + if ofPortPhys != "" { + // table 0, commit connections from local pods. + // ICNIv2 requires that local pod traffic can leave the node without SNAT. + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=109, in_port=%s, dl_src=%s, %s, %s_src=%s"+ + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", + nodetypes.DefaultOpenFlowCookie, defaultNetConfig.OfPortPatch, bridgeMacAddress, ipv, ipv, subnet, + config.Default.ConntrackZone, nodetypes.CtMarkOVN, ofPortPhys)) + } + } + } + + if ofPortPhys != "" { + for _, netConfig := range b.patchedNetConfigs() { + isNetworkAdvertised := netConfig.Advertised.Load() + // disableSNATMultipleGWs only applies to default network + disableSNATMultipleGWs := netConfig.IsDefaultNetwork() && config.Gateway.DisableSNATMultipleGWs + if !disableSNATMultipleGWs && !isNetworkAdvertised { + continue + } + output := netConfig.OfPortPatch + if isNetworkAdvertised && config.Gateway.Mode == config.GatewayModeLocal { + // except if advertised through BGP, go to kernel + // TODO: MEG enabled pods should still go through the patch port + // but holding this until + // https://issues.redhat.com/browse/FDP-646 is fixed, for now we + // are assuming MEG & BGP are not used together + output = nodetypes.OvsLocalPort + } + for _, clusterEntry := range netConfig.Subnets { + cidr := clusterEntry.CIDR + ipv := getIPv(cidr) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=15, table=1, %s, %s_dst=%s, "+ + "actions=output:%s", + nodetypes.DefaultOpenFlowCookie, ipv, ipv, cidr, output)) + } + if output == netConfig.OfPortPatch { + // except node management traffic + for _, subnet := range netConfig.NodeSubnets { + mgmtIP := util.GetNodeManagementIfAddr(subnet) + ipv := getIPv(mgmtIP) + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=16, table=1, %s, %s_dst=%s, "+ + "actions=output:%s", + nodetypes.DefaultOpenFlowCookie, ipv, ipv, mgmtIP.IP, nodetypes.OvsLocalPort), + ) + } + } + } + + // table 1, we check to see if this dest mac is the shared mac, if so send to host + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=1, %s dl_dst=%s, actions=%soutput:%s", + nodetypes.DefaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) + + if config.IPv6Mode { + // REMOVEME(trozet) when https://bugzilla.kernel.org/show_bug.cgi?id=11797 is resolved + // must flood icmpv6 Route Advertisement and Neighbor Advertisement traffic as it fails to create a CT entry + for _, icmpType := range []int{types.RouteAdvertisementICMPType, types.NeighborAdvertisementICMPType} { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=14, table=1,icmp6,icmpv6_type=%d actions=FLOOD", + nodetypes.DefaultOpenFlowCookie, icmpType)) + } + if ofPortPhys != "" { + // We send BFD traffic both on the host and in ovn + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=13, table=1, in_port=%s, udp6, tp_dst=3784, actions=output:%s,output:%s", + nodetypes.DefaultOpenFlowCookie, ofPortPhys, defaultNetConfig.OfPortPatch, ofPortHost)) + } + } + + if config.IPv4Mode { + if ofPortPhys != "" { + // We send BFD traffic both on the host and in ovn + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=13, table=1, in_port=%s, udp, tp_dst=3784, actions=output:%s,output:%s", + nodetypes.DefaultOpenFlowCookie, ofPortPhys, defaultNetConfig.OfPortPatch, ofPortHost)) + } + } + + // packets larger than known acceptable MTU need to go to kernel for + // potential fragmentation + // introduced specifically for replies to egress traffic not routed + // through the host + if config.Gateway.Mode == config.GatewayModeLocal && !config.Gateway.DisablePacketMTUCheck { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=10, table=11, reg0=0x1, "+ + "actions=output:%s", nodetypes.DefaultOpenFlowCookie, ofPortHost)) + + // Send UDN destined traffic to right patch port + for _, netConfig := range b.patchedNetConfigs() { + if netConfig.MasqCTMark != nodetypes.CtMarkOVN { + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=5, table=11, ct_mark=%s, "+ + "actions=output:%s", nodetypes.DefaultOpenFlowCookie, netConfig.MasqCTMark, netConfig.OfPortPatch)) + } + } + + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=1, table=11, "+ + "actions=output:%s", nodetypes.DefaultOpenFlowCookie, defaultNetConfig.OfPortPatch)) + } + + // table 1, all other connections do normal processing + dftFlows = append(dftFlows, + fmt.Sprintf("cookie=%s, priority=0, table=1, actions=output:NORMAL", nodetypes.DefaultOpenFlowCookie)) + } + + return dftFlows, nil +} + +func (b *BridgeConfiguration) PMTUDDropFlows(ipAddrs []string) []string { + b.mutex.Lock() + defer b.mutex.Unlock() + var flows []string + if config.Gateway.Mode != config.GatewayModeShared { + return nil + } + for _, addr := range ipAddrs { + for _, netConfig := range b.patchedNetConfigs() { + flows = append(flows, + nodeutil.GenerateICMPFragmentationFlow(addr, nodetypes.OutputPortDrop, netConfig.OfPortPatch, nodetypes.PmtudOpenFlowCookie, 700)) + } + } + + return flows +} + +func getIPv(ipnet *net.IPNet) string { + prefix := "ip" + if utilnet.IsIPv6CIDR(ipnet) { + prefix = "ipv6" + } + return prefix +} + +// hostNetworkNormalActionFlows returns the flows that allow IP{v4,v6} traffic: +// a. from pods in the OVN network to pods in a localnet network, on the same node +// b. from pods on the host to pods in a localnet network, on the same node +// when the localnet is mapped to breth0. +// The expected srcMAC is the MAC address of breth0 and the expected hostSubnets is the host subnets found on the node +// primary interface. +func hostNetworkNormalActionFlows(netConfig *BridgeUDNConfiguration, srcMAC string, hostSubnets []*net.IPNet, isV6 bool) []string { + var flows []string + var ipFamily, ipFamilyDest string + + if isV6 { + ipFamily = "ipv6" + ipFamilyDest = "ipv6_dst" + } else { + ipFamily = "ip" + ipFamilyDest = "nw_dst" + } + + formatFlow := func(inPort, destIP, ctMark string) string { + // Matching IP traffic will be handled by the bridge instead of being output directly + // to the NIC by the existing flow at prio=100. + flowTemplate := "cookie=%s, priority=102, in_port=%s, dl_src=%s, %s, %s=%s, " + + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL" + return fmt.Sprintf(flowTemplate, + nodetypes.DefaultOpenFlowCookie, + inPort, + srcMAC, + ipFamily, + ipFamilyDest, + destIP, + config.Default.ConntrackZone, + ctMark) + } + + // Traffic path (a): OVN->localnet for shared gw mode + if config.Gateway.Mode == config.GatewayModeShared { + for _, hostSubnet := range hostSubnets { + if utilnet.IsIPv6(hostSubnet.IP) != isV6 { + continue + } + flows = append(flows, formatFlow(netConfig.OfPortPatch, hostSubnet.String(), netConfig.MasqCTMark)) + } + } + + // Traffic path (a): OVN->localnet for local gw mode + // Traffic path (b): host->localnet for both gw modes + for _, hostSubnet := range hostSubnets { + if utilnet.IsIPv6(hostSubnet.IP) != isV6 { + continue + } + flows = append(flows, formatFlow(nodetypes.OvsLocalPort, hostSubnet.String(), nodetypes.CtMarkHost)) + } + + if isV6 { + // IPv6 neighbor discovery uses ICMPv6 messages sent to a special destination (ff02::1:ff00:0/104) + // that is unrelated to the host subnets matched in the prio=102 flow above. + // Allow neighbor discovery by matching against ICMP type and ingress port. + formatICMPFlow := func(inPort, ctMark string, icmpType int) string { + icmpFlowTemplate := "cookie=%s, priority=102, in_port=%s, dl_src=%s, icmp6, icmpv6_type=%d, " + + "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL" + return fmt.Sprintf(icmpFlowTemplate, + nodetypes.DefaultOpenFlowCookie, + inPort, + srcMAC, + icmpType, + config.Default.ConntrackZone, + ctMark) + } + + for _, icmpType := range []int{types.NeighborSolicitationICMPType, types.NeighborAdvertisementICMPType} { + // Traffic path (a) for ICMP: OVN-> localnet for shared gw mode + if config.Gateway.Mode == config.GatewayModeShared { + flows = append(flows, + formatICMPFlow(netConfig.OfPortPatch, netConfig.MasqCTMark, icmpType)) + } + + // Traffic path (a) for ICMP: OVN->localnet for local gw mode + // Traffic path (b) for ICMP: host->localnet for both gw modes + flows = append(flows, formatICMPFlow(nodetypes.OvsLocalPort, nodetypes.CtMarkHost, icmpType)) + } + } + return flows +} diff --git a/go-controller/pkg/node/default_node_network_controller.go b/go-controller/pkg/node/default_node_network_controller.go index 02a110b5d7..47ba8f6262 100644 --- a/go-controller/pkg/node/default_node_network_controller.go +++ b/go-controller/pkg/node/default_node_network_controller.go @@ -28,7 +28,7 @@ import ( utilnet "k8s.io/utils/net" "sigs.k8s.io/knftables" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" honode "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/controller" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni" @@ -45,6 +45,7 @@ import ( nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/ovspinning" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" + nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/apbroute" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/healthcheck" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" @@ -150,7 +151,7 @@ func newDefaultNodeNetworkController(cnnci *CommonNodeNetworkControllerInfo, sto routeManager: routeManager, ovsClient: ovsClient, } - if util.IsNetworkSegmentationSupportEnabled() && !config.OVNKubernetesFeature.DisableUDNHostIsolation { + if util.IsNetworkSegmentationSupportEnabled() { c.udnHostIsolationManager = NewUDNHostIsolationManager(config.IPv4Mode, config.IPv6Mode, cnnci.watchFactory.PodCoreInformer(), cnnci.name, cnnci.recorder) } @@ -187,7 +188,7 @@ func NewDefaultNodeNetworkController(cnnci *CommonNodeNetworkControllerInfo, net nc.initRetryFrameworkForNode() - err = setupPMTUDNFTSets() + err = setupRemoteNodeNFTSets() if err != nil { return nil, fmt.Errorf("failed to setup PMTUD nftables sets: %w", err) } @@ -830,7 +831,7 @@ func (nc *DefaultNodeNetworkController) Init(ctx context.Context) error { } } - if node, err = nc.Kube.GetNode(nc.name); err != nil { + if node, err = nc.watchFactory.GetNode(nc.name); err != nil { return fmt.Errorf("error retrieving node %s: %v", nc.name, err) } @@ -895,7 +896,7 @@ func (nc *DefaultNodeNetworkController) Init(ctx context.Context) error { // First wait for the node logical switch to be created by the Master, timeout is 300s. err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 300*time.Second, true, func(_ context.Context) (bool, error) { - if node, err = nc.Kube.GetNode(nc.name); err != nil { + if node, err = nc.watchFactory.GetNode(nc.name); err != nil { klog.Infof("Waiting to retrieve node %s: %v", nc.name, err) return false, nil } @@ -964,8 +965,12 @@ func (nc *DefaultNodeNetworkController) Init(ctx context.Context) error { // First part of gateway initialization. It will be completed by (nc *DefaultNodeNetworkController) Start() if config.OvnKubeNode.Mode != types.NodeModeDPUHost { + // IPv6 is not supported in DPU enabled nodes, error out if ovnkube is not set in IPv4 mode + if config.IPv6Mode && config.OvnKubeNode.Mode == types.NodeModeDPU { + return fmt.Errorf("IPv6 mode is not supported on a DPU enabled node") + } // Initialize gateway for OVS internal port or representor management port - gw, err := nc.initGatewayPreStart(subnets, nodeAnnotator, nc.mgmtPortController, nodeAddr) + gw, err := nc.initGatewayPreStart(subnets, nodeAnnotator, nc.mgmtPortController) if err != nil { return err } @@ -999,7 +1004,7 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { klog.Errorf("Setting klog \"loglevel\" to 5 failed, err: %v", err) } - if node, err = nc.Kube.GetNode(nc.name); err != nil { + if node, err = nc.watchFactory.GetNode(nc.name); err != nil { return fmt.Errorf("error retrieving node %s: %v", nc.name, err) } @@ -1028,7 +1033,37 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { // Complete gateway initialization if config.OvnKubeNode.Mode == types.NodeModeDPUHost { - err = nc.initGatewayDPUHost(nc.nodeAddress) + // Resolve gateway interface from PCI address when configured as "derive-from-mgmt-port" + // This performs the following steps: + // Get the management port network device name + // Retrieve the PCI address of the management port device + // Get the Physical Function (PF) PCI address from the Virtual Function (VF) PCI address + // Retrieve all network devices associated with the PF PCI address + // Select the first available network device as the gateway interface + if config.Gateway.Interface == types.DeriveFromMgmtPort { + netdevName, err := getManagementPortNetDev(config.OvnKubeNode.MgmtPortNetdev) + if err != nil { + return err + } + pciAddr, err := util.GetSriovnetOps().GetPciFromNetDevice(netdevName) + if err != nil { + return err + } + pfPciAddr, err := util.GetSriovnetOps().GetPfPciFromVfPci(pciAddr) + if err != nil { + return err + } + netdevs, err := util.GetSriovnetOps().GetNetDevicesFromPci(pfPciAddr) + if err != nil { + return err + } + if len(netdevs) == 0 { + return fmt.Errorf("no netdevs found for pci address %s", pfPciAddr) + } + netdevName = netdevs[0] + config.Gateway.Interface = netdevName + } + err = nc.initGatewayDPUHost(nc.nodeAddress, nodeAnnotator) if err != nil { return err } @@ -1079,7 +1114,7 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { err = wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 300*time.Second, true, func(_ context.Context) (bool, error) { // we loop through all the nodes in the cluster and ensure ovnkube-controller has finished creating the LRSR required for pod2pod overlay communication if !syncNodes { - nodes, err := nc.Kube.GetNodes() + nodes, err := nc.watchFactory.GetNodes() if err != nil { err1 = fmt.Errorf("upgrade hack: error retrieving node %s: %v", nc.name, err) return false, nil @@ -1186,10 +1221,8 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { // is not needed. Future upgrade flows will need to take DPUs into account. if config.OvnKubeNode.Mode != types.NodeModeDPUHost { if config.OvnKubeNode.Mode == types.NodeModeFull { - bridgeName := nc.Gateway.GetGatewayIface() - // Configure route for svc towards shared gw bridge - // Have to have the route to bridge for multi-NIC mode, where the default gateway may go to a non-OVS interface - if err := configureSvcRouteViaBridge(nc.routeManager, bridgeName); err != nil { + // Configure route for svc towards shared gateway interface + if err := configureSvcRouteViaInterface(nc.routeManager, nc.Gateway.GetGatewayIface(), DummyNextHopIPs()); err != nil { return err } } @@ -1292,7 +1325,7 @@ func (nc *DefaultNodeNetworkController) Start(ctx context.Context) error { if config.OVNKubernetesFeature.EnableEgressService { wf := nc.watchFactory.(*factory.WatchFactory) - c, err := egressservice.NewController(nc.stopChan, ovnKubeNodeSNATMark, nc.name, + c, err := egressservice.NewController(nc.stopChan, nodetypes.OvnKubeNodeSNATMark, nc.name, wf.EgressServiceInformer(), wf.ServiceInformer(), wf.EndpointSliceInformer()) if err != nil { return err @@ -1482,25 +1515,34 @@ func (nc *DefaultNodeNetworkController) WatchNodes() error { func (nc *DefaultNodeNetworkController) addOrUpdateNode(node *corev1.Node) error { var nftElems []*knftables.Element var addrs []string - for _, address := range node.Status.Addresses { - if address.Type != corev1.NodeInternalIP { - continue - } - nodeIP := net.ParseIP(address.Address) - if nodeIP == nil { - continue - } + // Use GetNodeAddresses to get all node IPs (including current node for openflow) + ipsv4, ipsv6, err := util.GetNodeAddresses(config.IPv4Mode, config.IPv6Mode, node) + if err != nil { + return fmt.Errorf("failed to get node addresses for node %q: %w", node.Name, err) + } + + // Process IPv4 addresses + for _, nodeIP := range ipsv4 { addrs = append(addrs, nodeIP.String()) klog.Infof("Adding remote node %q, IP: %s to PMTUD blocking rules", node.Name, nodeIP) - if utilnet.IsIPv4(nodeIP) { + // Only add to nftables if this is remote node + if node.Name != nc.name { nftElems = append(nftElems, &knftables.Element{ - Set: types.NFTNoPMTUDRemoteNodeIPsv4, + Set: types.NFTRemoteNodeIPsv4, Key: []string{nodeIP.String()}, }) - } else { + } + } + + // Process IPv6 addresses + for _, nodeIP := range ipsv6 { + addrs = append(addrs, nodeIP.String()) + klog.Infof("Adding remote node %q, IP: %s to PMTUD blocking rules", node.Name, nodeIP) + // Only add to nftables if this is remote node + if node.Name != nc.name { nftElems = append(nftElems, &knftables.Element{ - Set: types.NFTNoPMTUDRemoteNodeIPsv6, + Set: types.NFTRemoteNodeIPsv6, Key: []string{nodeIP.String()}, }) } @@ -1524,12 +1566,12 @@ func removePMTUDNodeNFTRules(nodeIPs []net.IP) error { // Remove IPs from NFT sets if utilnet.IsIPv4(nodeIP) { nftElems = append(nftElems, &knftables.Element{ - Set: types.NFTNoPMTUDRemoteNodeIPsv4, + Set: types.NFTRemoteNodeIPsv4, Key: []string{nodeIP.String()}, }) } else { nftElems = append(nftElems, &knftables.Element{ - Set: types.NFTNoPMTUDRemoteNodeIPsv6, + Set: types.NFTRemoteNodeIPsv6, Key: []string{nodeIP.String()}, }) } @@ -1545,18 +1587,18 @@ func removePMTUDNodeNFTRules(nodeIPs []net.IP) error { func (nc *DefaultNodeNetworkController) deleteNode(node *corev1.Node) { gw := nc.Gateway.(*gateway) gw.openflowManager.deleteFlowsByKey(getPMTUDKey(node.Name)) - ipsToRemove := make([]net.IP, 0) - for _, address := range node.Status.Addresses { - if address.Type != corev1.NodeInternalIP { - continue - } - nodeIP := net.ParseIP(address.Address) - if nodeIP == nil { - continue - } - ipsToRemove = append(ipsToRemove, nodeIP) + + // Use GetNodeAddresses to get node IPs + ipsv4, ipsv6, err := util.GetNodeAddresses(config.IPv4Mode, config.IPv6Mode, node) + if err != nil { + klog.Errorf("Failed to get node addresses for node %q: %v", node.Name, err) + return } + ipsToRemove := make([]net.IP, 0, len(ipsv4)+len(ipsv6)) + ipsToRemove = append(ipsToRemove, ipsv4...) + ipsToRemove = append(ipsToRemove, ipsv6...) + klog.Infof("Deleting NFT elements for node: %s", node.Name) if err := removePMTUDNodeNFTRules(ipsToRemove); err != nil { klog.Errorf("Failed to delete nftables rules for PMTUD blocking for node %q: %v", node.Name, err) @@ -1577,33 +1619,34 @@ func (nc *DefaultNodeNetworkController) syncNodes(objs []interface{}) error { if node.Name == nc.name { continue } - for _, address := range node.Status.Addresses { - if address.Type != corev1.NodeInternalIP { - continue - } - nodeIP := net.ParseIP(address.Address) - if nodeIP == nil { - continue - } - // Remove IPs from NFT sets - if utilnet.IsIPv4(nodeIP) { - keepNFTSetElemsV4 = append(keepNFTSetElemsV4, &knftables.Element{ - Set: types.NFTNoPMTUDRemoteNodeIPsv4, - Key: []string{nodeIP.String()}, - }) - } else { - keepNFTSetElemsV6 = append(keepNFTSetElemsV6, &knftables.Element{ - Set: types.NFTNoPMTUDRemoteNodeIPsv6, - Key: []string{nodeIP.String()}, - }) - } + // Use GetNodeAddresses to get node IPs + ipsv4, ipsv6, err := util.GetNodeAddresses(config.IPv4Mode, config.IPv6Mode, node) + if err != nil { + klog.Errorf("Failed to get node addresses for node %q: %v", node.Name, err) + continue + } + + // Process IPv4 addresses + for _, nodeIP := range ipsv4 { + keepNFTSetElemsV4 = append(keepNFTSetElemsV4, &knftables.Element{ + Set: types.NFTRemoteNodeIPsv4, + Key: []string{nodeIP.String()}, + }) + } + + // Process IPv6 addresses + for _, nodeIP := range ipsv6 { + keepNFTSetElemsV6 = append(keepNFTSetElemsV6, &knftables.Element{ + Set: types.NFTRemoteNodeIPsv6, + Key: []string{nodeIP.String()}, + }) } } - if err := recreateNFTSet(types.NFTNoPMTUDRemoteNodeIPsv4, keepNFTSetElemsV4); err != nil { + if err := recreateNFTSet(types.NFTRemoteNodeIPsv4, keepNFTSetElemsV4); err != nil { errors = append(errors, err) } - if err := recreateNFTSet(types.NFTNoPMTUDRemoteNodeIPsv6, keepNFTSetElemsV6); err != nil { + if err := recreateNFTSet(types.NFTRemoteNodeIPsv6, keepNFTSetElemsV6); err != nil { errors = append(errors, err) } @@ -1655,10 +1698,6 @@ func getPMTUDKey(nodeName string) string { return fmt.Sprintf("%s_pmtud", nodeName) } -func configureSvcRouteViaBridge(routeManager *routemanager.Controller, bridge string) error { - return configureSvcRouteViaInterface(routeManager, bridge, DummyNextHopIPs()) -} - // DummyNextHopIPs returns the fake next hops used for service traffic routing. // It is used in: // - br-ex, where we don't really care about the next hop GW in use as traffic is always routed to OVN diff --git a/go-controller/pkg/node/default_node_network_controller_test.go b/go-controller/pkg/node/default_node_network_controller_test.go index 368b333800..366ee881d6 100644 --- a/go-controller/pkg/node/default_node_network_controller_test.go +++ b/go-controller/pkg/node/default_node_network_controller_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net" + "strings" "sync" "time" @@ -21,6 +22,8 @@ import ( adminpolicybasedrouteclient "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned/fake" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube/mocks" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" @@ -35,18 +38,18 @@ import ( const v4PMTUDNFTRules = ` add table inet ovn-kubernetes -add rule inet ovn-kubernetes no-pmtud ip daddr @no-pmtud-remote-node-ips-v4 meta l4proto icmp icmp type 3 icmp code 4 counter drop +add rule inet ovn-kubernetes no-pmtud ip daddr @remote-node-ips-v4 meta l4proto icmp icmp type 3 icmp code 4 counter drop add chain inet ovn-kubernetes no-pmtud { type filter hook output priority 0 ; comment "Block egress needs frag/packet too big to remote k8s nodes" ; } -add set inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { type ipv4_addr ; comment "Block egress ICMP needs frag to remote Kubernetes nodes" ; } -add set inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { type ipv6_addr ; comment "Block egress ICMPv6 packet too big to remote Kubernetes nodes" ; } +add set inet ovn-kubernetes remote-node-ips-v4 { type ipv4_addr ; comment "Block egress ICMP needs frag to remote Kubernetes nodes" ; } +add set inet ovn-kubernetes remote-node-ips-v6 { type ipv6_addr ; comment "Block egress ICMPv6 packet too big to remote Kubernetes nodes" ; } ` const v6PMTUDNFTRules = ` add table inet ovn-kubernetes -add rule inet ovn-kubernetes no-pmtud meta l4proto icmpv6 icmpv6 type 2 icmpv6 code 0 ip6 daddr @no-pmtud-remote-node-ips-v6 counter drop +add rule inet ovn-kubernetes no-pmtud meta l4proto icmpv6 icmpv6 type 2 icmpv6 code 0 ip6 daddr @remote-node-ips-v6 counter drop add chain inet ovn-kubernetes no-pmtud { type filter hook output priority 0 ; comment "Block egress needs frag/packet too big to remote k8s nodes" ; } -add set inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { type ipv4_addr ; comment "Block egress ICMP needs frag to remote Kubernetes nodes" ; } -add set inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { type ipv6_addr ; comment "Block egress ICMPv6 packet too big to remote Kubernetes nodes" ; } +add set inet ovn-kubernetes remote-node-ips-v4 { type ipv4_addr ; comment "Block egress ICMP needs frag to remote Kubernetes nodes" ; } +add set inet ovn-kubernetes remote-node-ips-v6 { type ipv6_addr ; comment "Block egress ICMPv6 packet too big to remote Kubernetes nodes" ; } ` var _ = Describe("Node", func() { @@ -752,6 +755,9 @@ var _ = Describe("Node", func() { node := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", nodeIP+"/24"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -766,6 +772,9 @@ var _ = Describe("Node", func() { otherNode := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: remoteNodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", otherNodeIP+"/24"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -803,21 +812,14 @@ var _ = Describe("Node", func() { cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) nc = newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) nc.initRetryFrameworkForNode() - err = setupPMTUDNFTSets() + err = setupRemoteNodeNFTSets() Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeUDNConfiguration{ - ofPortPatch: "patch-breth0_ov", - } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: &bridgeConfiguration{ - netConfig: map[string]*bridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - }, + flowCache: map[string][]string{}, + defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), }, } @@ -834,7 +836,7 @@ var _ = Describe("Node", func() { err = nc.WatchNodes() Expect(err).NotTo(HaveOccurred()) nftRules := v4PMTUDNFTRules + ` -add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.254.61 } +add element inet ovn-kubernetes remote-node-ips-v4 { 169.254.254.61 } ` err = nodenft.MatchNFTRules(nftRules, nft.Dump()) Expect(err).NotTo(HaveOccurred()) @@ -864,6 +866,9 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.254.61 } node := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", nodeIP+"/24"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -878,6 +883,9 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.254.61 } otherNode := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: remoteNodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", otherSubnetNodeIP+"/24"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -915,21 +923,14 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.254.61 } cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) nc = newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) nc.initRetryFrameworkForNode() - err = setupPMTUDNFTSets() + err = setupRemoteNodeNFTSets() Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeUDNConfiguration{ - ofPortPatch: "patch-breth0_ov", - } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: &bridgeConfiguration{ - netConfig: map[string]*bridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - }, + flowCache: map[string][]string{}, + defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), }, } @@ -946,7 +947,7 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.254.61 } err = nc.WatchNodes() Expect(err).NotTo(HaveOccurred()) nftRules := v4PMTUDNFTRules + ` -add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.253.61 } +add element inet ovn-kubernetes remote-node-ips-v4 { 169.254.253.61 } ` err = nodenft.MatchNFTRules(nftRules, nft.Dump()) Expect(err).NotTo(HaveOccurred()) @@ -1018,6 +1019,9 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.253.61 } node := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", nodeIP+"/64"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -1032,6 +1036,9 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.253.61 } otherNode := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: remoteNodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", otherNodeIP+"/64"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -1069,21 +1076,14 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.253.61 } cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) nc = newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) nc.initRetryFrameworkForNode() - err = setupPMTUDNFTSets() + err = setupRemoteNodeNFTSets() Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeUDNConfiguration{ - ofPortPatch: "patch-breth0_ov", - } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: &bridgeConfiguration{ - netConfig: map[string]*bridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - }, + flowCache: map[string][]string{}, + defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), }, } @@ -1100,7 +1100,7 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v4 { 169.254.253.61 } err = nc.WatchNodes() Expect(err).NotTo(HaveOccurred()) nftRules := v6PMTUDNFTRules + ` -add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2001:db8:1::4 } +add element inet ovn-kubernetes remote-node-ips-v6 { 2001:db8:1::4 } ` err = nodenft.MatchNFTRules(nftRules, nft.Dump()) Expect(err).NotTo(HaveOccurred()) @@ -1129,6 +1129,9 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2001:db8:1::4 } node := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", nodeIP+"/64"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -1143,6 +1146,9 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2001:db8:1::4 } otherNode := corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: remoteNodeName, + Annotations: map[string]string{ + util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", otherSubnetNodeIP+"/64"), + }, }, Status: corev1.NodeStatus{ Addresses: []corev1.NodeAddress{ @@ -1180,21 +1186,14 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2001:db8:1::4 } cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) nc = newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) nc.initRetryFrameworkForNode() - err = setupPMTUDNFTSets() + err = setupRemoteNodeNFTSets() Expect(err).NotTo(HaveOccurred()) err = setupPMTUDNFTChain() Expect(err).NotTo(HaveOccurred()) - defaultNetConfig := &bridgeUDNConfiguration{ - ofPortPatch: "patch-breth0_ov", - } nc.Gateway = &gateway{ openflowManager: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: &bridgeConfiguration{ - netConfig: map[string]*bridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - }, + flowCache: map[string][]string{}, + defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), }, } @@ -1211,7 +1210,7 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2001:db8:1::4 } err = nc.WatchNodes() Expect(err).NotTo(HaveOccurred()) nftRules := v6PMTUDNFTRules + ` -add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2002:db8:1::4 } +add element inet ovn-kubernetes remote-node-ips-v6 { 2002:db8:1::4 } ` err = nodenft.MatchNFTRules(nftRules, nft.Dump()) Expect(err).NotTo(HaveOccurred()) @@ -1238,4 +1237,510 @@ add element inet ovn-kubernetes no-pmtud-remote-node-ips-v6 { 2002:db8:1::4 } }) + Describe("node ingress snat exclude subnets", func() { + + var ( + testNS ns.NetNS + nc *DefaultNodeNetworkController + app *cli.App + ) + + const ( + nodeName = "my-node" + ) + + BeforeEach(func() { + var err error + testNS, err = testutils.NewNS() + Expect(err).NotTo(HaveOccurred()) + Expect(config.PrepareTestConfig()).To(Succeed()) + + app = cli.NewApp() + app.Name = "test" + app.Flags = config.Flags + }) + + AfterEach(func() { + util.ResetNetLinkOpMockInst() // other tests in this package rely directly on netlink (e.g. gateway_init_linux_test.go) + Expect(testNS.Close()).To(Succeed()) + }) + + Context("with a cluster in IPv4 mode", func() { + const ( + ethName string = "lo1337" + nodeIP string = "169.254.254.60" + ethCIDR string = nodeIP + "/24" + ) + var link netlink.Link + + BeforeEach(func() { + config.IPv4Mode = true + config.IPv6Mode = false + config.Gateway.Mode = config.GatewayModeShared + + // Note we must do this in default netNS because + // nc.WatchNodes() will spawn goroutines which we cannot lock to the testNS + ovntest.AddLink(ethName) + + var err error + link, err = netlink.LinkByName(ethName) + Expect(err).NotTo(HaveOccurred()) + err = netlink.LinkSetUp(link) + Expect(err).NotTo(HaveOccurred()) + + // Add an IP address + addr, err := netlink.ParseAddr(ethCIDR) + Expect(err).NotTo(HaveOccurred()) + addr.Scope = int(netlink.SCOPE_UNIVERSE) + err = netlink.AddrAdd(link, addr) + Expect(err).NotTo(HaveOccurred()) + + }) + + AfterEach(func() { + err := netlink.LinkDel(link) + Expect(err).NotTo(HaveOccurred()) + }) + + ovntest.OnSupportedPlatformsIt("empty annotation on startup", func() { + + app.Action = func(_ *cli.Context) error { + node := corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Annotations: map[string]string{}, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: nodeIP, + }, + }, + }, + } + + nft := nodenft.SetFakeNFTablesHelper() + + kubeFakeClient := fake.NewSimpleClientset(&corev1.NodeList{ + Items: []corev1.Node{node}, + }) + fakeClient := &util.OVNNodeClientset{ + KubeClient: kubeFakeClient, + AdminPolicyRouteClient: adminpolicybasedrouteclient.NewSimpleClientset(), + NetworkAttchDefClient: nadfake.NewSimpleClientset(), + } + + stop := make(chan struct{}) + wf, err := factory.NewNodeWatchFactory(fakeClient, nodeName) + Expect(err).NotTo(HaveOccurred()) + wg := &sync.WaitGroup{} + defer func() { + close(stop) + wg.Wait() + wf.Shutdown() + }() + + err = wf.Start() + Expect(err).NotTo(HaveOccurred()) + routeManager := routemanager.NewController() + cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) + nc = newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) + nc.initRetryFrameworkForNode() + err = setupRemoteNodeNFTSets() + Expect(err).NotTo(HaveOccurred()) + err = setupPMTUDNFTChain() + Expect(err).NotTo(HaveOccurred()) + nc.Gateway = &gateway{ + openflowManager: &openflowManager{ + flowCache: map[string][]string{}, + defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), + }, + } + + err = managementport.SetupManagementPortNFTSets() + Expect(err).NotTo(HaveOccurred()) + + // must run route manager manually which is usually started with nc.Start() + wg.Add(1) + go func() { + defer GinkgoRecover() + defer wg.Done() + nc.routeManager.Run(stop, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + }() + By("no nftables elements should present at startup") + + err = nc.WatchNodes() + Expect(err).NotTo(HaveOccurred()) + Expect(nft.Dump()).NotTo(ContainSubstring("add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.168.1.0/24 }")) + Expect(nft.Dump()).NotTo(ContainSubstring("add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }")) + + By("adding subnets to node annotation should update nftables elements") + node.Annotations[util.OvnNodeDontSNATSubnets] = `["192.167.1.0/24"]` + + _, err = kubeFakeClient.CoreV1().Nodes().Update(context.TODO(), &node, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + cleanDump := strings.ReplaceAll(nft.Dump(), "\r", "") + return strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.167.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.168.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }") + }).WithTimeout(2 * time.Second).Should(BeTrue()) + + By("adding extra subnets to node annotation should update nftables elements") + + node.Annotations[util.OvnNodeDontSNATSubnets] = `["192.167.1.0/24","fd00::/64","192.169.1.0/24","fd11::/64"]` + + _, err = kubeFakeClient.CoreV1().Nodes().Update(context.TODO(), &node, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + cleanDump := strings.ReplaceAll(nft.Dump(), "\r", "") + return strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.167.1.0/24 }") && + strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.169.1.0/24 }") && + strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }") + }).WithTimeout(2 * time.Second).Should(BeTrue()) + + By("deleting node should remove nftables elements") + err = kubeFakeClient.CoreV1().Nodes().Delete(context.TODO(), nodeName, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + cleanDump := strings.ReplaceAll(nft.Dump(), "\r", "") + return !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.167.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.169.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }") + + }).WithTimeout(2 * time.Second).Should(BeTrue()) + return nil + } + + err := app.Run([]string{app.Name}) + Expect(err).NotTo(HaveOccurred()) + }) + + ovntest.OnSupportedPlatformsIt("non-empty annotation on startup", func() { + + app.Action = func(_ *cli.Context) error { + node := corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Annotations: map[string]string{ + util.OvnNodeDontSNATSubnets: `["192.168.1.0/24","fd00::/64"]`, + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: nodeIP, + }, + }, + }, + } + + nft := nodenft.SetFakeNFTablesHelper() + + kubeFakeClient := fake.NewSimpleClientset(&corev1.NodeList{ + Items: []corev1.Node{node}, + }) + fakeClient := &util.OVNNodeClientset{ + KubeClient: kubeFakeClient, + AdminPolicyRouteClient: adminpolicybasedrouteclient.NewSimpleClientset(), + NetworkAttchDefClient: nadfake.NewSimpleClientset(), + } + + stop := make(chan struct{}) + wf, err := factory.NewNodeWatchFactory(fakeClient, nodeName) + Expect(err).NotTo(HaveOccurred()) + wg := &sync.WaitGroup{} + defer func() { + close(stop) + wg.Wait() + wf.Shutdown() + }() + + err = wf.Start() + Expect(err).NotTo(HaveOccurred()) + routeManager := routemanager.NewController() + cnnci := NewCommonNodeNetworkControllerInfo(kubeFakeClient, fakeClient.AdminPolicyRouteClient, wf, nil, nodeName, routeManager) + nc = newDefaultNodeNetworkController(cnnci, stop, wg, routeManager, nil) + nc.initRetryFrameworkForNode() + err = setupRemoteNodeNFTSets() + Expect(err).NotTo(HaveOccurred()) + err = setupPMTUDNFTChain() + Expect(err).NotTo(HaveOccurred()) + nc.Gateway = &gateway{ + openflowManager: &openflowManager{ + flowCache: map[string][]string{}, + defaultBridge: bridgeconfig.TestDefaultBridgeConfig(), + }, + } + + err = managementport.SetupManagementPortNFTSets() + Expect(err).NotTo(HaveOccurred()) + + // must run route manager manually which is usually started with nc.Start() + wg.Add(1) + go func() { + defer GinkgoRecover() + defer wg.Done() + nc.routeManager.Run(stop, 10*time.Second) + Expect(err).NotTo(HaveOccurred()) + }() + By("expected nftables elements should present at startup") + + err = nc.WatchNodes() + Expect(err).NotTo(HaveOccurred()) + Expect(nft.Dump()).To(ContainSubstring("add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.168.1.0/24 }")) + Expect(nft.Dump()).To(ContainSubstring("add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }")) + + By("editing subnets on node annotation should update nftables elements") + node.Annotations[util.OvnNodeDontSNATSubnets] = `["192.167.1.0/24"]` + + _, err = kubeFakeClient.CoreV1().Nodes().Update(context.TODO(), &node, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + cleanDump := strings.ReplaceAll(nft.Dump(), "\r", "") + return strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.167.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.168.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }") + }).WithTimeout(2 * time.Second).Should(BeTrue()) + + By("adding extra subnets to node annotation should update nftables elements") + + node.Annotations[util.OvnNodeDontSNATSubnets] = `["192.167.1.0/24","fd00::/64","192.169.1.0/24","fd11::/64"]` + + _, err = kubeFakeClient.CoreV1().Nodes().Update(context.TODO(), &node, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + cleanDump := strings.ReplaceAll(nft.Dump(), "\r", "") + return strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.167.1.0/24 }") && + strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.169.1.0/24 }") && + strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }") + }).WithTimeout(2 * time.Second).Should(BeTrue()) + + By("deleting node should remove nftables elements") + err = kubeFakeClient.CoreV1().Nodes().Delete(context.TODO(), nodeName, metav1.DeleteOptions{}) + Expect(err).NotTo(HaveOccurred()) + + Eventually(func() bool { + cleanDump := strings.ReplaceAll(nft.Dump(), "\r", "") + return !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.167.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { 192.169.1.0/24 }") && + !strings.Contains(cleanDump, "add element inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { fd00::/64 }") + + }).WithTimeout(2 * time.Second).Should(BeTrue()) + return nil + } + + err := app.Run([]string{app.Name}) + Expect(err).NotTo(HaveOccurred()) + }) + + }) + + Describe("derive-from-mgmt-port gateway interface resolution", func() { + var ( + kubeMock *mocks.Interface + sriovnetMock utilMocks.SriovnetOps + netlinkOpsMock *utilMocks.NetLinkOps + netlinkLinkMock *netlink_mocks.Link + ) + + const ( + nodeName = "test-node" + mgmtPortNetdev = "pf0vf0" + vfPciAddr = "0000:01:02.3" + pfPciAddr = "0000:01:00.0" + expectedGatewayIntf = "eth0" + ) + + BeforeEach(func() { + kubeMock = new(mocks.Interface) + sriovnetMock = utilMocks.SriovnetOps{} + netlinkOpsMock = new(utilMocks.NetLinkOps) + netlinkLinkMock = new(netlink_mocks.Link) + + util.SetSriovnetOpsInst(&sriovnetMock) + util.SetNetLinkOpMockInst(netlinkOpsMock) + + // Setup default node network controller + cnnci := &CommonNodeNetworkControllerInfo{ + name: nodeName, + Kube: kubeMock, + } + nc = &DefaultNodeNetworkController{ + BaseNodeNetworkController: BaseNodeNetworkController{ + CommonNodeNetworkControllerInfo: *cnnci, + ReconcilableNetInfo: &util.DefaultNetInfo{}, + }, + } + + // Set DPU host mode + config.OvnKubeNode.Mode = types.NodeModeDPUHost + config.OvnKubeNode.MgmtPortNetdev = mgmtPortNetdev + config.Gateway.Interface = types.DeriveFromMgmtPort + }) + + AfterEach(func() { + util.ResetNetLinkOpMockInst() + }) + + Context("when gateway interface is set to derive-from-mgmt-port", func() { + It("should resolve gateway interface from PCI address successfully", func() { + // Mock getManagementPortNetDev to return the management port device + netlinkOpsMock.On("LinkByName", mgmtPortNetdev).Return(netlinkLinkMock, nil) + netlinkLinkMock.On("Attrs").Return(&netlink.LinkAttrs{ + Name: mgmtPortNetdev, + }) + + // Mock GetPciFromNetDevice to return VF PCI address + sriovnetMock.On("GetPciFromNetDevice", mgmtPortNetdev).Return(vfPciAddr, nil) + + // Mock GetPfPciFromVfPci to return PF PCI address + sriovnetMock.On("GetPfPciFromVfPci", vfPciAddr).Return(pfPciAddr, nil) + + // Mock GetNetDevicesFromPci to return available network devices + sriovnetMock.On("GetNetDevicesFromPci", pfPciAddr).Return([]string{expectedGatewayIntf, "eth1"}, nil) + + // Execute the gateway interface resolution logic + // This simulates the logic in the Start() method + netdevName, err := getManagementPortNetDev(config.OvnKubeNode.MgmtPortNetdev) + Expect(err).NotTo(HaveOccurred()) + Expect(netdevName).To(Equal(mgmtPortNetdev)) + + pciAddr, err := util.GetSriovnetOps().GetPciFromNetDevice(netdevName) + Expect(err).NotTo(HaveOccurred()) + Expect(pciAddr).To(Equal(vfPciAddr)) + + pfPciAddr, err := util.GetSriovnetOps().GetPfPciFromVfPci(pciAddr) + Expect(err).NotTo(HaveOccurred()) + Expect(pfPciAddr).To(Equal(pfPciAddr)) + + netdevs, err := util.GetSriovnetOps().GetNetDevicesFromPci(pfPciAddr) + Expect(err).NotTo(HaveOccurred()) + Expect(netdevs).To(HaveLen(2)) + Expect(netdevs[0]).To(Equal(expectedGatewayIntf)) + + // Verify that the first device is selected as the gateway interface + selectedNetdev := netdevs[0] + Expect(selectedNetdev).To(Equal(expectedGatewayIntf)) + }) + + It("should return error when no network devices found for PCI address", func() { + // Mock getManagementPortNetDev to return the management port device + netlinkOpsMock.On("LinkByName", mgmtPortNetdev).Return(netlinkLinkMock, nil) + netlinkLinkMock.On("Attrs").Return(&netlink.LinkAttrs{ + Name: mgmtPortNetdev, + }) + + // Mock GetPciFromNetDevice to return VF PCI address + sriovnetMock.On("GetPciFromNetDevice", mgmtPortNetdev).Return(vfPciAddr, nil) + + // Mock GetPfPciFromVfPci to return PF PCI address + sriovnetMock.On("GetPfPciFromVfPci", vfPciAddr).Return(pfPciAddr, nil) + + // Mock GetNetDevicesFromPci to return empty list + sriovnetMock.On("GetNetDevicesFromPci", pfPciAddr).Return([]string{}, nil) + + // Execute the gateway interface resolution logic + netdevName, err := getManagementPortNetDev(config.OvnKubeNode.MgmtPortNetdev) + Expect(err).NotTo(HaveOccurred()) + + pciAddr, err := util.GetSriovnetOps().GetPciFromNetDevice(netdevName) + Expect(err).NotTo(HaveOccurred()) + + pfPciAddr, err := util.GetSriovnetOps().GetPfPciFromVfPci(pciAddr) + Expect(err).NotTo(HaveOccurred()) + + netdevs, err := util.GetSriovnetOps().GetNetDevicesFromPci(pfPciAddr) + Expect(err).NotTo(HaveOccurred()) + Expect(netdevs).To(BeEmpty()) + + // This should result in an error when no devices are found + Expect(netdevs).To(BeEmpty()) + }) + + It("should return error when GetPciFromNetDevice fails", func() { + // Mock getManagementPortNetDev to return the management port device + netlinkOpsMock.On("LinkByName", mgmtPortNetdev).Return(netlinkLinkMock, nil) + netlinkLinkMock.On("Attrs").Return(&netlink.LinkAttrs{ + Name: mgmtPortNetdev, + }) + + // Mock GetPciFromNetDevice to return error + sriovnetMock.On("GetPciFromNetDevice", mgmtPortNetdev).Return("", fmt.Errorf("failed to get PCI address")) + + // Execute the gateway interface resolution logic + netdevName, err := getManagementPortNetDev(config.OvnKubeNode.MgmtPortNetdev) + Expect(err).NotTo(HaveOccurred()) + + _, err = util.GetSriovnetOps().GetPciFromNetDevice(netdevName) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to get PCI address")) + }) + + It("should return error when GetPfPciFromVfPci fails", func() { + // Mock getManagementPortNetDev to return the management port device + netlinkOpsMock.On("LinkByName", mgmtPortNetdev).Return(netlinkLinkMock, nil) + netlinkLinkMock.On("Attrs").Return(&netlink.LinkAttrs{ + Name: mgmtPortNetdev, + }) + + // Mock GetPciFromNetDevice to return VF PCI address + sriovnetMock.On("GetPciFromNetDevice", mgmtPortNetdev).Return(vfPciAddr, nil) + + // Mock GetPfPciFromVfPci to return error + sriovnetMock.On("GetPfPciFromVfPci", vfPciAddr).Return("", fmt.Errorf("failed to get PF PCI address")) + + // Execute the gateway interface resolution logic + netdevName, err := getManagementPortNetDev(config.OvnKubeNode.MgmtPortNetdev) + Expect(err).NotTo(HaveOccurred()) + + pciAddr, err := util.GetSriovnetOps().GetPciFromNetDevice(netdevName) + Expect(err).NotTo(HaveOccurred()) + + _, err = util.GetSriovnetOps().GetPfPciFromVfPci(pciAddr) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to get PF PCI address")) + }) + + It("should return error when GetNetDevicesFromPci fails", func() { + // Mock getManagementPortNetDev to return the management port device + netlinkOpsMock.On("LinkByName", mgmtPortNetdev).Return(netlinkLinkMock, nil) + netlinkLinkMock.On("Attrs").Return(&netlink.LinkAttrs{ + Name: mgmtPortNetdev, + }) + + // Mock GetPciFromNetDevice to return VF PCI address + sriovnetMock.On("GetPciFromNetDevice", mgmtPortNetdev).Return(vfPciAddr, nil) + + // Mock GetPfPciFromVfPci to return PF PCI address + sriovnetMock.On("GetPfPciFromVfPci", vfPciAddr).Return(pfPciAddr, nil) + + // Mock GetNetDevicesFromPci to return error + sriovnetMock.On("GetNetDevicesFromPci", pfPciAddr).Return(nil, fmt.Errorf("failed to get network devices")) + + // Execute the gateway interface resolution logic + netdevName, err := getManagementPortNetDev(config.OvnKubeNode.MgmtPortNetdev) + Expect(err).NotTo(HaveOccurred()) + + pciAddr, err := util.GetSriovnetOps().GetPciFromNetDevice(netdevName) + Expect(err).NotTo(HaveOccurred()) + + pfPciAddr, err := util.GetSriovnetOps().GetPfPciFromVfPci(pciAddr) + Expect(err).NotTo(HaveOccurred()) + + _, err = util.GetSriovnetOps().GetNetDevicesFromPci(pfPciAddr) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to get network devices")) + }) + }) + }) + }) }) diff --git a/go-controller/pkg/node/egress_service_test.go b/go-controller/pkg/node/egress_service_test.go index bb4e57f5ca..ca44ac311d 100644 --- a/go-controller/pkg/node/egress_service_test.go +++ b/go-controller/pkg/node/egress_service_test.go @@ -19,6 +19,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/controllers/egressservice" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" + nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" util "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/mocks" @@ -299,7 +300,7 @@ var _ = Describe("Egress Service Operations", func() { c, err := egressservice.NewController( stopChan, - ovnKubeNodeSNATMark, + nodetypes.OvnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), @@ -405,7 +406,7 @@ add element inet ovn-kubernetes egress-service-snat-v4 { 10.128.0.4 comment "nam c, err := egressservice.NewController( stopChan, - ovnKubeNodeSNATMark, + nodetypes.OvnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), @@ -610,7 +611,7 @@ add element inet ovn-kubernetes egress-service-snat-v4 { 10.128.0.3 comment "nam c, err := egressservice.NewController( stopChan, - ovnKubeNodeSNATMark, + nodetypes.OvnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), @@ -805,7 +806,7 @@ add element inet ovn-kubernetes egress-service-snat-v4 { 10.128.0.11 comment "na c, err := egressservice.NewController( stopChan, - ovnKubeNodeSNATMark, + nodetypes.OvnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), @@ -964,7 +965,7 @@ add element inet ovn-kubernetes egress-service-snat-v4 { 10.128.0.11 comment "na c, err := egressservice.NewController( stopChan, - ovnKubeNodeSNATMark, + nodetypes.OvnKubeNodeSNATMark, "node", wf.EgressServiceInformer(), wf.ServiceInformer(), diff --git a/go-controller/pkg/node/gateway_egressip.go b/go-controller/pkg/node/egressip/gateway_egressip.go similarity index 91% rename from go-controller/pkg/node/gateway_egressip.go rename to go-controller/pkg/node/egressip/gateway_egressip.go index 13e41c4542..38bd2b058e 100644 --- a/go-controller/pkg/node/gateway_egressip.go +++ b/go-controller/pkg/node/egressip/gateway_egressip.go @@ -1,4 +1,4 @@ -package node +package egressip import ( "encoding/json" @@ -75,15 +75,15 @@ func (e markIPs) containsIP(ip net.IP) bool { return false } -type markIPsCache struct { +type MarkIPsCache struct { mu sync.Mutex hasSyncOnce bool markToIPs markIPs IPToMark map[string]int } -func newMarkIPsCache() *markIPsCache { - return &markIPsCache{ +func NewMarkIPsCache() *MarkIPsCache { + return &MarkIPsCache{ mu: sync.Mutex{}, markToIPs: markIPs{ v4: make(map[int]string), @@ -93,7 +93,7 @@ func newMarkIPsCache() *markIPsCache { } } -func (mic *markIPsCache) IsIPPresent(ip net.IP) bool { +func (mic *MarkIPsCache) IsIPPresent(ip net.IP) bool { mic.mu.Lock() defer mic.mu.Unlock() if ip == nil { @@ -103,7 +103,7 @@ func (mic *markIPsCache) IsIPPresent(ip net.IP) bool { return isFound } -func (mic *markIPsCache) insertMarkIP(pktMark util.EgressIPMark, ip net.IP) { +func (mic *MarkIPsCache) insertMarkIP(pktMark util.EgressIPMark, ip net.IP) { mic.mu.Lock() defer mic.mu.Unlock() if ip == nil { @@ -113,7 +113,7 @@ func (mic *markIPsCache) insertMarkIP(pktMark util.EgressIPMark, ip net.IP) { mic.IPToMark[ip.String()] = pktMark.ToInt() } -func (mic *markIPsCache) deleteMarkIP(pktMark util.EgressIPMark, ip net.IP) { +func (mic *MarkIPsCache) deleteMarkIP(pktMark util.EgressIPMark, ip net.IP) { mic.mu.Lock() defer mic.mu.Unlock() if ip == nil { @@ -123,7 +123,7 @@ func (mic *markIPsCache) deleteMarkIP(pktMark util.EgressIPMark, ip net.IP) { delete(mic.IPToMark, ip.String()) } -func (mic *markIPsCache) replaceAll(markIPs markIPs) { +func (mic *MarkIPsCache) replaceAll(markIPs markIPs) { mic.mu.Lock() mic.markToIPs = markIPs for mark, ipv4 := range markIPs.v4 { @@ -135,7 +135,7 @@ func (mic *markIPsCache) replaceAll(markIPs markIPs) { mic.mu.Unlock() } -func (mic *markIPsCache) GetIPv4() map[int]string { +func (mic *MarkIPsCache) GetIPv4() map[int]string { mic.mu.Lock() defer mic.mu.Unlock() dupe := make(map[int]string) @@ -148,7 +148,7 @@ func (mic *markIPsCache) GetIPv4() map[int]string { return dupe } -func (mic *markIPsCache) GetIPv6() map[int]string { +func (mic *MarkIPsCache) GetIPv6() map[int]string { mic.mu.Lock() defer mic.mu.Unlock() dupe := make(map[int]string) @@ -161,19 +161,19 @@ func (mic *markIPsCache) GetIPv6() map[int]string { return dupe } -func (mic *markIPsCache) HasSyncdOnce() bool { +func (mic *MarkIPsCache) HasSyncdOnce() bool { mic.mu.Lock() defer mic.mu.Unlock() return mic.hasSyncOnce } -func (mic *markIPsCache) setSyncdOnce() { +func (mic *MarkIPsCache) setSyncdOnce() { mic.mu.Lock() mic.hasSyncOnce = true mic.mu.Unlock() } -type bridgeEIPAddrManager struct { +type BridgeEIPAddrManager struct { nodeName string bridgeName string nodeAnnotationMu sync.Mutex @@ -182,18 +182,18 @@ type bridgeEIPAddrManager struct { nodeLister corev1listers.NodeLister kube kube.Interface addrManager *linkmanager.Controller - cache *markIPsCache + cache *MarkIPsCache } -// newBridgeEIPAddrManager manages EgressIP IPs that must be added to ovs bridges to support EgressIP feature for user +// NewBridgeEIPAddrManager manages EgressIP IPs that must be added to ovs bridges to support EgressIP feature for user // defined networks. It saves the assigned IPs to its respective Node annotation in-order to understand which IPs it assigned // prior to restarting. // It provides the assigned IPs info node IP handler. Node IP handler must not consider assigned EgressIP IPs as possible node IPs. // Openflow manager must generate the SNAT openflow conditional on packet marks and therefore needs access to EIP IPs and associated packet marks. -// bridgeEIPAddrManager must be able to force Openflow manager to resync if EgressIP assignment for the node changes. -func newBridgeEIPAddrManager(nodeName, bridgeName string, linkManager *linkmanager.Controller, - kube kube.Interface, eIPInformer egressipinformers.EgressIPInformer, nodeInformer corev1informers.NodeInformer) *bridgeEIPAddrManager { - return &bridgeEIPAddrManager{ +// BridgeEIPAddrManager must be able to force Openflow manager to resync if EgressIP assignment for the node changes. +func NewBridgeEIPAddrManager(nodeName, bridgeName string, linkManager *linkmanager.Controller, + kube kube.Interface, eIPInformer egressipinformers.EgressIPInformer, nodeInformer corev1informers.NodeInformer) *BridgeEIPAddrManager { + return &BridgeEIPAddrManager{ nodeName: nodeName, // k8 node name bridgeName: bridgeName, // bridge name for which EIP IPs are managed nodeAnnotationMu: sync.Mutex{}, // mu for updating Node annotation @@ -202,15 +202,15 @@ func newBridgeEIPAddrManager(nodeName, bridgeName string, linkManager *linkmanag nodeLister: nodeInformer.Lister(), kube: kube, addrManager: linkManager, - cache: newMarkIPsCache(), // cache to store pkt mark -> EIP IP. + cache: NewMarkIPsCache(), // cache to store pkt mark -> EIP IP. } } -func (g *bridgeEIPAddrManager) GetCache() *markIPsCache { +func (g *BridgeEIPAddrManager) GetCache() *MarkIPsCache { return g.cache } -func (g *bridgeEIPAddrManager) addEgressIP(eip *egressipv1.EgressIP) (bool, error) { +func (g *BridgeEIPAddrManager) AddEgressIP(eip *egressipv1.EgressIP) (bool, error) { var isUpdated bool if !util.IsEgressIPMarkSet(eip.Annotations) { return isUpdated, nil @@ -237,7 +237,7 @@ func (g *bridgeEIPAddrManager) addEgressIP(eip *egressipv1.EgressIP) (bool, erro return isUpdated, nil } -func (g *bridgeEIPAddrManager) updateEgressIP(oldEIP, newEIP *egressipv1.EgressIP) (bool, error) { +func (g *BridgeEIPAddrManager) UpdateEgressIP(oldEIP, newEIP *egressipv1.EgressIP) (bool, error) { var isUpdated bool // at most, one status item for this node will be found. for _, oldStatus := range oldEIP.Status.Items { @@ -293,7 +293,7 @@ func (g *bridgeEIPAddrManager) updateEgressIP(oldEIP, newEIP *egressipv1.EgressI return isUpdated, nil } -func (g *bridgeEIPAddrManager) deleteEgressIP(eip *egressipv1.EgressIP) (bool, error) { +func (g *BridgeEIPAddrManager) DeleteEgressIP(eip *egressipv1.EgressIP) (bool, error) { var isUpdated bool if !util.IsEgressIPMarkSet(eip.Annotations) { return isUpdated, nil @@ -322,7 +322,7 @@ func (g *bridgeEIPAddrManager) deleteEgressIP(eip *egressipv1.EgressIP) (bool, e return isUpdated, nil } -func (g *bridgeEIPAddrManager) syncEgressIP(objs []interface{}) error { +func (g *BridgeEIPAddrManager) SyncEgressIP(objs []interface{}) error { // caller must synchronise annotIPs, err := g.getAnnotationIPs() if err != nil { @@ -380,7 +380,7 @@ func (g *bridgeEIPAddrManager) syncEgressIP(objs []interface{}) error { // addIPToAnnotation adds an address to the collection of existing addresses stored in the nodes annotation. Caller // may repeat addition of addresses without care for duplicate addresses being added. -func (g *bridgeEIPAddrManager) addIPToAnnotation(candidateIP net.IP) error { +func (g *BridgeEIPAddrManager) addIPToAnnotation(candidateIP net.IP) error { g.nodeAnnotationMu.Lock() defer g.nodeAnnotationMu.Unlock() return retry.RetryOnConflict(retry.DefaultRetry, func() error { @@ -412,7 +412,7 @@ func (g *bridgeEIPAddrManager) addIPToAnnotation(candidateIP net.IP) error { // deleteIPsFromAnnotation deletes address from annotation. If multiple users, callers must synchronise. // deletion of address that doesn't exist will not cause an error. -func (g *bridgeEIPAddrManager) deleteIPsFromAnnotation(candidateIPs ...net.IP) error { +func (g *BridgeEIPAddrManager) deleteIPsFromAnnotation(candidateIPs ...net.IP) error { g.nodeAnnotationMu.Lock() defer g.nodeAnnotationMu.Unlock() return retry.RetryOnConflict(retry.DefaultRetry, func() error { @@ -446,7 +446,7 @@ func (g *bridgeEIPAddrManager) deleteIPsFromAnnotation(candidateIPs ...net.IP) e }) } -func (g *bridgeEIPAddrManager) addIPBridge(ip net.IP) error { +func (g *BridgeEIPAddrManager) addIPBridge(ip net.IP) error { link, err := util.GetNetLinkOps().LinkByName(g.bridgeName) if err != nil { return fmt.Errorf("failed to get link obj by name %s: %v", g.bridgeName, err) @@ -454,7 +454,7 @@ func (g *bridgeEIPAddrManager) addIPBridge(ip net.IP) error { return g.addrManager.AddAddress(getEIPBridgeNetlinkAddress(ip, link.Attrs().Index)) } -func (g *bridgeEIPAddrManager) deleteIPBridge(ip net.IP) error { +func (g *BridgeEIPAddrManager) deleteIPBridge(ip net.IP) error { link, err := util.GetNetLinkOps().LinkByName(g.bridgeName) if err != nil { return fmt.Errorf("failed to get link obj by name %s: %v", g.bridgeName, err) @@ -464,7 +464,7 @@ func (g *bridgeEIPAddrManager) deleteIPBridge(ip net.IP) error { // getAnnotationIPs retrieves the egress IP annotation from the current node Nodes object. If multiple users, callers must synchronise. // if annotation isn't present, empty set is returned -func (g *bridgeEIPAddrManager) getAnnotationIPs() ([]net.IP, error) { +func (g *BridgeEIPAddrManager) getAnnotationIPs() ([]net.IP, error) { node, err := g.nodeLister.Get(g.nodeName) if err != nil { return nil, fmt.Errorf("failed to get node %s from lister: %v", g.nodeName, err) diff --git a/go-controller/pkg/node/egressip/gateway_egressip_suite_test.go b/go-controller/pkg/node/egressip/gateway_egressip_suite_test.go new file mode 100644 index 0000000000..d9d627c882 --- /dev/null +++ b/go-controller/pkg/node/egressip/gateway_egressip_suite_test.go @@ -0,0 +1,13 @@ +package egressip + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestNodeSuite(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node Gateway EgressIP Suite") +} diff --git a/go-controller/pkg/node/gateway_egressip_test.go b/go-controller/pkg/node/egressip/gateway_egressip_test.go similarity index 92% rename from go-controller/pkg/node/gateway_egressip_test.go rename to go-controller/pkg/node/egressip/gateway_egressip_test.go index bd09738200..07a03a87b6 100644 --- a/go-controller/pkg/node/gateway_egressip_test.go +++ b/go-controller/pkg/node/egressip/gateway_egressip_test.go @@ -1,4 +1,4 @@ -package node +package egressip import ( "fmt" @@ -67,10 +67,10 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) defer stopFn() eip := getEIPAssignedToNode(nodeName, mark, ipV4Addr) - isUpdated, err := addrMgr.addEgressIP(eip) + isUpdated, err := addrMgr.AddEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -82,10 +82,10 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) defer stopFn() eip := getEIPAssignedToNode(nodeName, "", ipV4Addr) - isUpdated, err := addrMgr.addEgressIP(eip) + isUpdated, err := addrMgr.AddEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeFalse()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).ShouldNot(gomega.ConsistOf(ipV4Addr)) gomega.Expect(nlMock.AssertNotCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -97,10 +97,10 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) defer stopFn() eip := getEIPAssignedToNode(nodeName, "not-an-integer", ipV4Addr) - isUpdated, err := addrMgr.addEgressIP(eip) + isUpdated, err := addrMgr.AddEgressIP(eip) gomega.Expect(err).Should(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeFalse()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).ShouldNot(gomega.ConsistOf(ipV4Addr)) gomega.Expect(nlMock.AssertNotCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -117,10 +117,10 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, generateAnnotFromIPs(ipV4Addr2)) defer stopFn() eip := getEIPAssignedToNode(nodeName, mark, ipV4Addr) - isUpdated, err := addrMgr.addEgressIP(eip) + isUpdated, err := addrMgr.AddEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr, ipV4Addr2)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -140,10 +140,10 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { defer stopFn() assignedEIP := getEIPAssignedToNode(nodeName, mark, ipV4Addr) unassignedEIP := getEIPNotAssignedToNode(mark, ipV4Addr) - isUpdated, err := addrMgr.updateEgressIP(unassignedEIP, assignedEIP) + isUpdated, err := addrMgr.UpdateEgressIP(unassignedEIP, assignedEIP) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -162,13 +162,13 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { defer stopFn() assignedEIP := getEIPAssignedToNode(nodeName, mark, ipV4Addr) unassignedEIP := getEIPNotAssignedToNode(mark, ipV4Addr) - isUpdated, err := addrMgr.updateEgressIP(unassignedEIP, assignedEIP) + isUpdated, err := addrMgr.UpdateEgressIP(unassignedEIP, assignedEIP) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - isUpdated, err = addrMgr.updateEgressIP(assignedEIP, unassignedEIP) + isUpdated, err = addrMgr.UpdateEgressIP(assignedEIP, unassignedEIP) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).ShouldNot(gomega.ConsistOf(ipV4Addr)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -191,13 +191,13 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { unassignedEIP := getEIPNotAssignedToNode(mark, ipV4Addr) assignedEIP1 := getEIPAssignedToNode(nodeName, mark, ipV4Addr) assignedEIP2 := getEIPAssignedToNode(nodeName, mark2, ipV4Addr2) - isUpdated, err := addrMgr.updateEgressIP(unassignedEIP, assignedEIP1) + isUpdated, err := addrMgr.UpdateEgressIP(unassignedEIP, assignedEIP1) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - isUpdated, err = addrMgr.updateEgressIP(assignedEIP1, assignedEIP2) + isUpdated, err = addrMgr.UpdateEgressIP(assignedEIP1, assignedEIP2) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr2)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -221,13 +221,13 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) defer stopFn() eip := getEIPAssignedToNode(nodeName, mark, ipV4Addr) - isUpdated, err := addrMgr.addEgressIP(eip) + isUpdated, err := addrMgr.AddEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - isUpdated, err = addrMgr.deleteEgressIP(eip) + isUpdated, err = addrMgr.DeleteEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeTrue()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).ShouldNot(gomega.ConsistOf(ipV4Addr)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -240,10 +240,10 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, generateAnnotFromIPs(ipV4Addr2)) defer stopFn() eip := getEIPNotAssignedToNode(mark, ipV4Addr) - isUpdated, err := addrMgr.deleteEgressIP(eip) + isUpdated, err := addrMgr.DeleteEgressIP(eip) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process a valid EgressIP") gomega.Expect(isUpdated).Should(gomega.BeFalse()) - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr2)) gomega.Expect(nlMock.AssertNotCalled(ginkgo.GinkgoT(), "AddrDel", nlLinkMock, @@ -265,9 +265,9 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { eipAssigned1 := getEIPAssignedToNode(nodeName, mark, ipV4Addr) eipAssigned2 := getEIPAssignedToNode(nodeName, mark2, ipV4Addr2) eipUnassigned3 := getEIPNotAssignedToNode(mark3, ipV4Addr3) - err := addrMgr.syncEgressIP([]interface{}{eipAssigned1, eipAssigned2, eipUnassigned3}) + err := addrMgr.SyncEgressIP([]interface{}{eipAssigned1, eipAssigned2, eipUnassigned3}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr, ipV4Addr2)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -289,9 +289,9 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { defer stopFn() eipAssigned1 := getEIPAssignedToNode(nodeName, mark, ipV4Addr) eipAssigned2 := getEIPAssignedToNode(nodeName, mark2, ipV4Addr2) - err := addrMgr.syncEgressIP([]interface{}{eipAssigned1, eipAssigned2}) + err := addrMgr.SyncEgressIP([]interface{}{eipAssigned1, eipAssigned2}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.ConsistOf(ipV4Addr, ipV4Addr2)) gomega.Expect(nlMock.AssertCalled(ginkgo.GinkgoT(), "AddrAdd", nlLinkMock, @@ -306,16 +306,16 @@ var _ = ginkgo.Describe("Gateway EgressIP", func() { addrMgr, stopFn := initBridgeEIPAddrManager(nodeName, bridgeName, emptyAnnotation) // previously configured IP defer stopFn() eipAssigned := getEIPAssignedToNode(nodeName, "", ipV4Addr) - err := addrMgr.syncEgressIP([]interface{}{eipAssigned}) + err := addrMgr.SyncEgressIP([]interface{}{eipAssigned}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "should process valid EgressIPs") - node, err := addrMgr.kube.GetNode(nodeName) + node, err := addrMgr.nodeLister.Get(nodeName) gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "node should be present within kapi") gomega.Expect(parseEIPsFromAnnotation(node)).Should(gomega.BeEmpty()) }) }) }) -func initBridgeEIPAddrManager(nodeName, bridgeName string, bridgeEIPAnnot string) (*bridgeEIPAddrManager, func()) { +func initBridgeEIPAddrManager(nodeName, bridgeName string, bridgeEIPAnnot string) (*BridgeEIPAddrManager, func()) { node := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{Name: nodeName, Annotations: map[string]string{}}, } @@ -327,7 +327,7 @@ func initBridgeEIPAddrManager(nodeName, bridgeName string, bridgeEIPAnnot string gomega.Expect(watchFactory.Start()).Should(gomega.Succeed(), "watch factory should start") gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "watch factory creation must succeed") linkManager := linkmanager.NewController(nodeName, true, true, nil) - return newBridgeEIPAddrManager(nodeName, bridgeName, linkManager, &kube.Kube{KClient: client}, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()), + return NewBridgeEIPAddrManager(nodeName, bridgeName, linkManager, &kube.Kube{KClient: client}, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()), watchFactory.Shutdown } diff --git a/go-controller/pkg/node/gateway.go b/go-controller/pkg/node/gateway.go index 1b4544f89b..fa812377e7 100644 --- a/go-controller/pkg/node/gateway.go +++ b/go-controller/pkg/node/gateway.go @@ -17,6 +17,8 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/informer" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/egressip" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -49,7 +51,7 @@ type gateway struct { nodePortWatcher informer.ServiceAndEndpointsEventHandler openflowManager *openflowManager nodeIPManager *addressManager - bridgeEIPAddrManager *bridgeEIPAddrManager + bridgeEIPAddrManager *egressip.BridgeEIPAddrManager initFunc func() error readyFunc func() (bool, error) @@ -58,6 +60,8 @@ type gateway struct { watchFactory *factory.WatchFactory // used for retry stopChan <-chan struct{} wg *sync.WaitGroup + + nextHops []net.IP } func (g *gateway) AddService(svc *corev1.Service) error { @@ -233,7 +237,7 @@ func (g *gateway) AddEgressIP(eip *egressipv1.EgressIP) error { if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { return nil } - isSyncRequired, err := g.bridgeEIPAddrManager.addEgressIP(eip) + isSyncRequired, err := g.bridgeEIPAddrManager.AddEgressIP(eip) if err != nil { return err } @@ -249,7 +253,7 @@ func (g *gateway) UpdateEgressIP(oldEIP, newEIP *egressipv1.EgressIP) error { if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { return nil } - isSyncRequired, err := g.bridgeEIPAddrManager.updateEgressIP(oldEIP, newEIP) + isSyncRequired, err := g.bridgeEIPAddrManager.UpdateEgressIP(oldEIP, newEIP) if err != nil { return err } @@ -265,7 +269,7 @@ func (g *gateway) DeleteEgressIP(eip *egressipv1.EgressIP) error { if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { return nil } - isSyncRequired, err := g.bridgeEIPAddrManager.deleteEgressIP(eip) + isSyncRequired, err := g.bridgeEIPAddrManager.DeleteEgressIP(eip) if err != nil { return err } @@ -281,7 +285,7 @@ func (g *gateway) SyncEgressIP(eips []interface{}) error { if !util.IsNetworkSegmentationSupportEnabled() || !config.OVNKubernetesFeature.EnableInterconnect || config.Gateway.Mode == config.GatewayModeDisabled { return nil } - if err := g.bridgeEIPAddrManager.syncEgressIP(eips); err != nil { + if err := g.bridgeEIPAddrManager.SyncEgressIP(eips); err != nil { return err } if err := g.Reconcile(); err != nil { @@ -354,14 +358,14 @@ func setupUDPAggregationUplink(ifname string) error { func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops []net.IP, nodeSubnets, gwIPs []*net.IPNet, advertised bool, nodeAnnotator kube.Annotator) ( - *bridgeConfiguration, *bridgeConfiguration, error) { - gatewayBridge, err := bridgeForInterface(gwIntf, nodeName, types.PhysicalNetworkName, nodeSubnets, gwIPs, gwNextHops, advertised) + *bridgeconfig.BridgeConfiguration, *bridgeconfig.BridgeConfiguration, error) { + gatewayBridge, err := bridgeconfig.NewBridgeConfiguration(gwIntf, nodeName, types.PhysicalNetworkName, nodeSubnets, gwIPs, advertised) if err != nil { return nil, nil, fmt.Errorf("bridge for interface failed for %s: %w", gwIntf, err) } - var egressGWBridge *bridgeConfiguration + var egressGWBridge *bridgeconfig.BridgeConfiguration if egressGatewayIntf != "" { - egressGWBridge, err = bridgeForInterface(egressGatewayIntf, nodeName, types.PhysicalNetworkExGwName, nodeSubnets, nil, nil, false) + egressGWBridge, err = bridgeconfig.NewBridgeConfiguration(egressGatewayIntf, nodeName, types.PhysicalNetworkExGwName, nodeSubnets, nil, false) if err != nil { return nil, nil, fmt.Errorf("bridge for interface failed for %s: %w", egressGatewayIntf, err) } @@ -380,7 +384,7 @@ func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops "IP fragmentation or large TCP/UDP payloads may not be forwarded correctly.") enableGatewayMTU = false } else { - chkPktLengthSupported, err := util.DetectCheckPktLengthSupport(gatewayBridge.bridgeName) + chkPktLengthSupported, err := util.DetectCheckPktLengthSupport(gatewayBridge.GetBridgeName()) if err != nil { return nil, nil, err } @@ -414,9 +418,9 @@ func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops } if config.Default.EnableUDPAggregation { - err = setupUDPAggregationUplink(gatewayBridge.uplinkName) + err = setupUDPAggregationUplink(gatewayBridge.GetUplinkName()) if err == nil && egressGWBridge != nil { - err = setupUDPAggregationUplink(egressGWBridge.uplinkName) + err = setupUDPAggregationUplink(egressGWBridge.GetUplinkName()) } if err != nil { klog.Warningf("Could not enable UDP packet aggregation on uplink interface (aggregation will be disabled): %v", err) @@ -424,48 +428,38 @@ func gatewayInitInternal(nodeName, gwIntf, egressGatewayIntf string, gwNextHops } } + // Set static FDB entry for LOCAL port + if err := util.SetStaticFDBEntry(gatewayBridge.GetBridgeName(), gatewayBridge.GetBridgeName(), gatewayBridge.GetMAC()); err != nil { + return nil, nil, err + } + l3GwConfig := util.L3GatewayConfig{ Mode: config.Gateway.Mode, ChassisID: chassisID, - BridgeID: gatewayBridge.bridgeName, - InterfaceID: gatewayBridge.interfaceID, - MACAddress: gatewayBridge.macAddress, - IPAddresses: gatewayBridge.ips, + BridgeID: gatewayBridge.GetBridgeName(), + InterfaceID: gatewayBridge.GetInterfaceID(), + MACAddress: gatewayBridge.GetMAC(), + IPAddresses: gatewayBridge.GetIPs(), NextHops: gwNextHops, NodePortEnable: config.Gateway.NodeportEnable, VLANID: &config.Gateway.VLANID, } if egressGWBridge != nil { - l3GwConfig.EgressGWInterfaceID = egressGWBridge.interfaceID - l3GwConfig.EgressGWMACAddress = egressGWBridge.macAddress - l3GwConfig.EgressGWIPAddresses = egressGWBridge.ips + l3GwConfig.EgressGWInterfaceID = egressGWBridge.GetInterfaceID() + l3GwConfig.EgressGWMACAddress = egressGWBridge.GetMAC() + l3GwConfig.EgressGWIPAddresses = egressGWBridge.GetIPs() } err = util.SetL3GatewayConfig(nodeAnnotator, &l3GwConfig) return gatewayBridge, egressGWBridge, err } -func gatewayReady(patchPort string) (bool, error) { - // Get ofport of patchPort - ofport, _, err := util.GetOVSOfPort("--if-exists", "get", "interface", patchPort, "ofport") - if err != nil || len(ofport) == 0 { - return false, nil - } - klog.Info("Gateway is ready") - return true, nil -} - func (g *gateway) GetGatewayBridgeIface() string { return g.openflowManager.getDefaultBridgeName() } func (g *gateway) GetGatewayIface() string { - return g.openflowManager.defaultBridge.getGatewayIface() -} - -// getMaxFrameLength returns the maximum frame size (ignoring VLAN header) that a gateway can handle -func getMaxFrameLength() int { - return config.Default.MTU + 14 + return g.openflowManager.defaultBridge.GetGatewayIface() } // SetDefaultGatewayBridgeMAC updates the mac address for the OFM used to render flows with @@ -475,11 +469,11 @@ func (g *gateway) SetDefaultGatewayBridgeMAC(macAddr net.HardwareAddr) { } func (g *gateway) SetDefaultPodNetworkAdvertised(isPodNetworkAdvertised bool) { - g.openflowManager.defaultBridge.netConfig[types.DefaultNetworkName].advertised.Store(isPodNetworkAdvertised) + g.openflowManager.defaultBridge.GetNetworkConfig(types.DefaultNetworkName).Advertised.Store(isPodNetworkAdvertised) } func (g *gateway) GetDefaultPodNetworkAdvertised() bool { - return g.openflowManager.defaultBridge.netConfig[types.DefaultNetworkName].advertised.Load() + return g.openflowManager.defaultBridge.GetNetworkConfig(types.DefaultNetworkName).Advertised.Load() } // Reconcile handles triggering updates to different components of a gateway, like OFM, Services @@ -527,216 +521,9 @@ func (g *gateway) addAllServices() []error { func (g *gateway) updateSNATRules() error { subnets := util.IPsToNetworkIPs(g.nodeIPManager.mgmtPort.GetAddresses()...) - if g.GetDefaultPodNetworkAdvertised() || config.Gateway.Mode != config.GatewayModeLocal { - return delLocalGatewayPodSubnetNATRules(subnets...) - } - - return addLocalGatewayPodSubnetNATRules(subnets...) -} - -type bridgeConfiguration struct { - sync.Mutex - nodeName string - bridgeName string - uplinkName string - gwIface string - gwIfaceRep string - ips []*net.IPNet - interfaceID string - macAddress net.HardwareAddr - ofPortPhys string - ofPortHost string - netConfig map[string]*bridgeUDNConfiguration - eipMarkIPs *markIPsCache - nextHops []net.IP -} - -func (b *bridgeConfiguration) getGatewayIface() string { - // If gwIface is set, then accelerated GW interface is present and we use it. If else use external bridge instead. - if b.gwIface != "" { - return b.gwIface - } - return b.bridgeName -} - -// updateInterfaceIPAddresses sets and returns the bridge's current ips -func (b *bridgeConfiguration) updateInterfaceIPAddresses(node *corev1.Node) ([]*net.IPNet, error) { - b.Lock() - defer b.Unlock() - ifAddrs, err := getNetworkInterfaceIPAddresses(b.getGatewayIface()) - if err != nil { - return nil, err - } - - // For DPU, here we need to use the DPU host's IP address which is the tenant cluster's - // host internal IP address instead of the DPU's external bridge IP address. - if config.OvnKubeNode.Mode == types.NodeModeDPU { - nodeAddrStr, err := util.GetNodePrimaryIP(node) - if err != nil { - return nil, err - } - nodeAddr := net.ParseIP(nodeAddrStr) - if nodeAddr == nil { - return nil, fmt.Errorf("failed to parse node IP address. %v", nodeAddrStr) - } - ifAddrs, err = getDPUHostPrimaryIPAddresses(nodeAddr, ifAddrs) - if err != nil { - return nil, err - } - } - - b.ips = ifAddrs - return ifAddrs, nil -} - -func bridgeForInterface(intfName, nodeName, - physicalNetworkName string, - nodeSubnets, gwIPs []*net.IPNet, - gwNextHops []net.IP, - advertised bool) (*bridgeConfiguration, error) { - var intfRep string - var err error - isGWAcclInterface := false - gwIntf := intfName - - defaultNetConfig := &bridgeUDNConfiguration{ - masqCTMark: ctMarkOVN, - subnets: config.Default.ClusterSubnets, - nodeSubnets: nodeSubnets, - } - res := bridgeConfiguration{ - nodeName: nodeName, - netConfig: map[string]*bridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - eipMarkIPs: newMarkIPsCache(), - } - if len(gwNextHops) > 0 { - res.nextHops = gwNextHops - } - res.netConfig[types.DefaultNetworkName].advertised.Store(advertised) - - if config.Gateway.GatewayAcceleratedInterface != "" { - // Try to get representor for the specified gateway device. - // If function succeeds, then it is either a valid switchdev VF or SF, and we can use this accelerated device - // for node IP, Host Ofport for Openflow etc. - // If failed - error for improper configuration option - intfRep, err = getRepresentor(config.Gateway.GatewayAcceleratedInterface) - if err != nil { - return nil, fmt.Errorf("gateway accelerated interface %s is not valid: %w", config.Gateway.GatewayAcceleratedInterface, err) - } - gwIntf = config.Gateway.GatewayAcceleratedInterface - isGWAcclInterface = true - klog.Infof("For gateway accelerated interface %s representor: %s", config.Gateway.GatewayAcceleratedInterface, intfRep) - } else { - intfRep, err = getRepresentor(gwIntf) - if err == nil { - isGWAcclInterface = true - } - } - - if isGWAcclInterface { - bridgeName, _, err := util.RunOVSVsctl("port-to-br", intfRep) - if err != nil { - return nil, fmt.Errorf("failed to find bridge that has port %s: %w", intfRep, err) - } - link, err := util.GetNetLinkOps().LinkByName(gwIntf) - if err != nil { - return nil, fmt.Errorf("failed to get netdevice link for %s: %w", gwIntf, err) - } - uplinkName, err := util.GetNicName(bridgeName) - if err != nil { - return nil, fmt.Errorf("failed to find nic name for bridge %s: %w", bridgeName, err) - } - res.bridgeName = bridgeName - res.uplinkName = uplinkName - res.gwIfaceRep = intfRep - res.gwIface = gwIntf - res.macAddress = link.Attrs().HardwareAddr - } else if bridgeName, _, err := util.RunOVSVsctl("port-to-br", intfName); err == nil { - // This is an OVS bridge's internal port - uplinkName, err := util.GetNicName(bridgeName) - if err != nil { - return nil, fmt.Errorf("failed to find nic name for bridge %s: %w", bridgeName, err) - } - res.bridgeName = bridgeName - res.gwIface = bridgeName - res.uplinkName = uplinkName - gwIntf = bridgeName - } else if _, _, err := util.RunOVSVsctl("br-exists", intfName); err != nil { - // This is not a OVS bridge. We need to create a OVS bridge - // and add cluster.GatewayIntf as a port of that bridge. - bridgeName, err := util.NicToBridge(intfName) - if err != nil { - return nil, fmt.Errorf("nicToBridge failed for %s: %w", intfName, err) - } - res.bridgeName = bridgeName - res.gwIface = bridgeName - res.uplinkName = intfName - gwIntf = bridgeName - } else { - // gateway interface is an OVS bridge - uplinkName, err := getIntfName(intfName) - if err != nil { - if config.Gateway.Mode == config.GatewayModeLocal && config.Gateway.AllowNoUplink { - klog.Infof("Could not find uplink for %s, setup gateway bridge with no uplink port, egress IP and egress GW will not work", intfName) - } else { - return nil, fmt.Errorf("failed to find intfName for %s: %w", intfName, err) - } - } else { - res.uplinkName = uplinkName - } - res.bridgeName = intfName - res.gwIface = intfName - } - // Now, we get IP addresses for the bridge - if len(gwIPs) > 0 { - // use gwIPs if provided - res.ips = gwIPs - } else { - // get IP addresses from OVS bridge. If IP does not exist, - // error out. - res.ips, err = getNetworkInterfaceIPAddresses(gwIntf) - if err != nil { - return nil, fmt.Errorf("failed to get interface details for %s: %w", gwIntf, err) - } - } - - if !isGWAcclInterface { // We do not have an accelerated device for Gateway interface - res.macAddress, err = util.GetOVSPortMACAddress(gwIntf) - if err != nil { - return nil, fmt.Errorf("failed to get MAC address for ovs port %s: %w", gwIntf, err) - } - } - - res.interfaceID, err = bridgedGatewayNodeSetup(nodeName, res.bridgeName, physicalNetworkName) - if err != nil { - return nil, fmt.Errorf("failed to set up shared interface gateway: %v", err) - } - - // the name of the patch port created by ovn-controller is of the form - // patch--to-br-int - defaultNetConfig.patchPort = (&util.DefaultNetInfo{}).GetNetworkScopedPatchPortName(res.bridgeName, nodeName) - - // for DPU we use the host MAC address for the Gateway configuration - if config.OvnKubeNode.Mode == types.NodeModeDPU { - hostRep, err := util.GetDPUHostInterface(res.bridgeName) - if err != nil { - return nil, err - } - res.macAddress, err = util.GetSriovnetOps().GetRepresentorPeerMacAddress(hostRep) - if err != nil { - return nil, err - } - } - return &res, nil -} - -func getRepresentor(intfName string) (string, error) { - deviceID, err := util.GetDeviceIDFromNetdevice(intfName) - if err != nil { - return "", err + if config.Gateway.Mode != config.GatewayModeLocal { + return delLocalGatewayPodSubnetNFTRules() } - return util.GetFunctionRepresentorName(deviceID) + return addOrUpdateLocalGatewayPodSubnetNFTRules(g.GetDefaultPodNetworkAdvertised(), subnets...) } diff --git a/go-controller/pkg/node/gateway_init.go b/go-controller/pkg/node/gateway_init.go index c7553f7d0d..b4d11d69cf 100644 --- a/go-controller/pkg/node/gateway_init.go +++ b/go-controller/pkg/node/gateway_init.go @@ -9,6 +9,7 @@ import ( "github.com/vishvananda/netlink" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" @@ -18,94 +19,11 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" + nodeutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) -// bridgedGatewayNodeSetup enables forwarding on bridge interface, sets up the physical network name mappings for the bridge, -// and returns an ifaceID created from the bridge name and the node name -func bridgedGatewayNodeSetup(nodeName, bridgeName, physicalNetworkName string) (string, error) { - // IPv6 forwarding is enabled globally - if config.IPv4Mode { - stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net.ipv4.conf.%s.forwarding=1", bridgeName)) - if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.forwarding = 1", bridgeName) { - return "", fmt.Errorf("could not set the correct forwarding value for interface %s: stdout: %v, stderr: %v, err: %v", - bridgeName, stdout, stderr, err) - } - } - - // ovn-bridge-mappings maps a physical network name to a local ovs bridge - // that provides connectivity to that network. It is in the form of physnet1:br1,physnet2:br2. - // Note that there may be multiple ovs bridge mappings, be sure not to override - // the mappings for the other physical network - stdout, stderr, err := util.RunOVSVsctl("--if-exists", "get", "Open_vSwitch", ".", - "external_ids:ovn-bridge-mappings") - if err != nil { - return "", fmt.Errorf("failed to get ovn-bridge-mappings stderr:%s (%v)", stderr, err) - } - // skip the existing mapping setting for the specified physicalNetworkName - mapString := "" - bridgeMappings := strings.Split(stdout, ",") - for _, bridgeMapping := range bridgeMappings { - m := strings.Split(bridgeMapping, ":") - if network := m[0]; network != physicalNetworkName { - if len(mapString) != 0 { - mapString += "," - } - mapString += bridgeMapping - } - } - if len(mapString) != 0 { - mapString += "," - } - mapString += physicalNetworkName + ":" + bridgeName - - _, stderr, err = util.RunOVSVsctl("set", "Open_vSwitch", ".", - fmt.Sprintf("external_ids:ovn-bridge-mappings=%s", mapString)) - if err != nil { - return "", fmt.Errorf("failed to set ovn-bridge-mappings for ovs bridge %s"+ - ", stderr:%s (%v)", bridgeName, stderr, err) - } - - ifaceID := bridgeName + "_" + nodeName - return ifaceID, nil -} - -// getNetworkInterfaceIPAddresses returns the IP addresses for the network interface 'iface'. -func getNetworkInterfaceIPAddresses(iface string) ([]*net.IPNet, error) { - allIPs, err := util.GetFilteredInterfaceV4V6IPs(iface) - if err != nil { - return nil, fmt.Errorf("could not find IP addresses: %v", err) - } - - var ips []*net.IPNet - var foundIPv4 bool - var foundIPv6 bool - for _, ip := range allIPs { - if utilnet.IsIPv6CIDR(ip) { - if config.IPv6Mode && !foundIPv6 { - // For IPv6 addresses with 128 prefix, let's try to find an appropriate subnet - // in the routing table - subnetIP, err := util.GetIPv6OnSubnet(iface, ip) - if err != nil { - return nil, fmt.Errorf("could not find IPv6 address on subnet: %v", err) - } - ips = append(ips, subnetIP) - foundIPv6 = true - } - } else if config.IPv4Mode && !foundIPv4 { - ips = append(ips, ip) - foundIPv4 = true - } - } - if config.IPv4Mode && !foundIPv4 { - return nil, fmt.Errorf("failed to find IPv4 address on interface %s", iface) - } else if config.IPv6Mode && !foundIPv6 { - return nil, fmt.Errorf("failed to find IPv6 address on interface %s", iface) - } - return ips, nil -} - func getGatewayNextHops() ([]net.IP, string, error) { var gatewayNextHops []net.IP var needIPv4NextHop bool @@ -216,52 +134,6 @@ func getGatewayNextHops() ([]net.IP, string, error) { return gatewayNextHops, gatewayIntf, nil } -// getDPUHostPrimaryIPAddresses returns the DPU host IP/Network based on K8s Node IP -// and DPU IP subnet overriden by config config.Gateway.RouterSubnet -func getDPUHostPrimaryIPAddresses(k8sNodeIP net.IP, ifAddrs []*net.IPNet) ([]*net.IPNet, error) { - // Note(adrianc): No Dual-Stack support at this point as we rely on k8s node IP to derive gateway information - // for each node. - var gwIps []*net.IPNet - isIPv4 := utilnet.IsIPv4(k8sNodeIP) - - // override subnet mask via config - if config.Gateway.RouterSubnet != "" { - _, addr, err := net.ParseCIDR(config.Gateway.RouterSubnet) - if err != nil { - return nil, err - } - if utilnet.IsIPv4CIDR(addr) != isIPv4 { - return nil, fmt.Errorf("unexpected gateway router subnet provided (%s). "+ - "does not match Node IP address format", config.Gateway.RouterSubnet) - } - if !addr.Contains(k8sNodeIP) { - return nil, fmt.Errorf("unexpected gateway router subnet provided (%s). "+ - "subnet does not contain Node IP address (%s)", config.Gateway.RouterSubnet, k8sNodeIP) - } - addr.IP = k8sNodeIP - gwIps = append(gwIps, addr) - } else { - // Assume Host and DPU share the same subnet - // in this case just update the matching IPNet with the Host's IP address - for _, addr := range ifAddrs { - if utilnet.IsIPv4CIDR(addr) != isIPv4 { - continue - } - // expect k8s Node IP to be contained in the given subnet - if !addr.Contains(k8sNodeIP) { - continue - } - newAddr := *addr - newAddr.IP = k8sNodeIP - gwIps = append(gwIps, &newAddr) - } - if len(gwIps) == 0 { - return nil, fmt.Errorf("could not find subnet on DPU matching node IP %s", k8sNodeIP) - } - } - return gwIps, nil -} - // getInterfaceByIP retrieves Interface that has `ip` assigned to it func getInterfaceByIP(ip net.IP) (string, error) { links, err := util.GetNetLinkOps().LinkList() @@ -315,6 +187,39 @@ func configureSvcRouteViaInterface(routeManager *routemanager.Controller, iface return nil } +// getNodePrimaryIfAddrs returns the appropriate interface addresses based on the node mode +func getNodePrimaryIfAddrs(watchFactory factory.NodeWatchFactory, nodeName string, gatewayIntf string) ([]*net.IPNet, error) { + switch config.OvnKubeNode.Mode { + case types.NodeModeDPU: + // For DPU mode, use the host IP address from node annotation + node, err := watchFactory.GetNode(nodeName) + if err != nil { + return nil, fmt.Errorf("error retrieving node %s: %v", nodeName, err) + } + + // Extract the primary DPU address annotation from the node + nodeIfAddr, err := util.GetNodePrimaryDPUHostAddrAnnotation(node) + if err != nil { + return nil, err + } + + if nodeIfAddr.IPv4 == "" { + return nil, fmt.Errorf("node primary DPU address annotation is empty for node %s", nodeName) + } + + nodeIP, nodeAddrs, err := net.ParseCIDR(nodeIfAddr.IPv4) + if err != nil { + return nil, fmt.Errorf("failed to parse node IP address %s: %v", nodeIfAddr.IPv4, err) + } + + nodeAddrs.IP = nodeIP + return []*net.IPNet{nodeAddrs}, nil + default: + // For other modes, get network interface IP addresses directly + return nodeutil.GetNetworkInterfaceIPAddresses(gatewayIntf) + } +} + // initGatewayPreStart executes the first part of the gateway initialization for the node. // It creates the gateway object, the node IP manager, openflow manager and node port watcher // once OVN controller is ready and the patch port exists for this node. @@ -324,7 +229,6 @@ func (nc *DefaultNodeNetworkController) initGatewayPreStart( subnets []*net.IPNet, nodeAnnotator kube.Annotator, mgmtPort managementport.Interface, - kubeNodeIP net.IP, ) (*gateway, error) { klog.Info("Initializing Gateway Functionality for Gateway PreStart") @@ -343,20 +247,12 @@ func (nc *DefaultNodeNetworkController) initGatewayPreStart( egressGWInterface = interfaceForEXGW(config.Gateway.EgressGWInterface) } - ifAddrs, err = getNetworkInterfaceIPAddresses(gatewayIntf) + // Get interface addresses based on node mode + ifAddrs, err = getNodePrimaryIfAddrs(nc.watchFactory, nc.name, gatewayIntf) if err != nil { return nil, err } - // For DPU need to use the host IP addr which currently is assumed to be K8s Node cluster - // internal IP address. - if config.OvnKubeNode.Mode == types.NodeModeDPU { - ifAddrs, err = getDPUHostPrimaryIPAddresses(kubeNodeIP, ifAddrs) - if err != nil { - return nil, err - } - } - if err := util.SetNodePrimaryIfAddrs(nodeAnnotator, ifAddrs); err != nil { klog.Errorf("Unable to set primary IP net label on node, err: %v", err) } @@ -472,7 +368,7 @@ func (nc *DefaultNodeNetworkController) initGatewayMainStart(gw *gateway, waiter // interfaceForEXGW takes the interface requested to act as exgw bridge // and returns the name of the bridge if exists, or the interface itself -// if the bridge needs to be created. In this last scenario, bridgeForInterface +// if the bridge needs to be created. In this last scenario, BridgeForInterface // will create the bridge. func interfaceForEXGW(intfName string) string { if _, _, err := util.RunOVSVsctl("br-exists", intfName); err == nil { @@ -488,7 +384,7 @@ func interfaceForEXGW(intfName string) string { return intfName } -func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP) error { +func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP, nodeAnnotator kube.Annotator) error { // A DPU host gateway is complementary to the shared gateway running // on the DPU embedded CPU. it performs some initializations and // watch on services for iptable rule updates and run a loadBalancerHealth checker @@ -496,35 +392,71 @@ func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP) er klog.Info("Initializing Shared Gateway Functionality on DPU host") var err error - // Force gateway interface to be the interface associated with kubeNodeIP - gwIntf, err := getInterfaceByIP(kubeNodeIP) + // Find the network interface that has the Kubernetes node IP assigned to it + // This interface will be used for DPU host gateway operations + kubeIntf, err := getInterfaceByIP(kubeNodeIP) if err != nil { return err } - config.Gateway.Interface = gwIntf - _, gatewayIntf, err := getGatewayNextHops() + // Get all IP addresses (IPv4 and IPv6) configured on the detected interface + ifAddrs, err := nodeutil.GetNetworkInterfaceIPAddresses(kubeIntf) if err != nil { return err } - ifAddrs, err := getNetworkInterfaceIPAddresses(gatewayIntf) - if err != nil { + // Extract the IPv4 address from the interface addresses for node annotation + nodeIPNet, _ := util.MatchFirstIPNetFamily(false, ifAddrs) + nodeAddrSet := sets.New[string](nodeIPNet.String()) + + // If no gateway interface is explicitly configured, use the detected interface + if config.Gateway.Interface == "" { + config.Gateway.Interface = kubeIntf + } + + // If a different gateway interface is configured than the one with used for the kubernetes node IP, + // get its addresses and add them to the node address set for routing purposes + if config.Gateway.Interface != kubeIntf { + ifAddrs, err = nodeutil.GetNetworkInterfaceIPAddresses(config.Gateway.Interface) + if err != nil { + return err + } + detectedIPNetv4, _ := util.MatchFirstIPNetFamily(false, ifAddrs) + nodeAddrSet.Insert(detectedIPNetv4.String()) + // Use the configured interface for the masquerade route instead of the auto-detected one + kubeIntf = config.Gateway.Interface + } + + // Set the primary DPU address annotation on the node with the interface addresses + if err := util.SetNodePrimaryDPUHostAddr(nodeAnnotator, ifAddrs); err != nil { + klog.Errorf("Unable to set primary IP net label on node, err: %v", err) + return err + } + + // Set the host CIDRs annotation to include all detected network addresses + // This helps with routing decisions for traffic coming from the host + if err := util.SetNodeHostCIDRs(nodeAnnotator, nodeAddrSet); err != nil { + klog.Errorf("Unable to set host-cidrs on node, err: %v", err) return err } + // Apply all node annotations to the Kubernetes node object + if err := nodeAnnotator.Run(); err != nil { + return fmt.Errorf("failed to set node %s annotations: %w", nc.name, err) + } + // Delete stale masquerade resources if there are any. This is to make sure that there // are no Linux resources with IP from old masquerade subnet when masquerade subnet // gets changed as part of day2 operation. - if err := deleteStaleMasqueradeResources(gwIntf, nc.name, nc.watchFactory); err != nil { + if err := deleteStaleMasqueradeResources(kubeIntf, nc.name, nc.watchFactory); err != nil { return fmt.Errorf("failed to remove stale masquerade resources: %w", err) } - if err := setNodeMasqueradeIPOnExtBridge(gwIntf); err != nil { - return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", gwIntf, err) + if err := setNodeMasqueradeIPOnExtBridge(kubeIntf); err != nil { + return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", kubeIntf, err) } - if err := addMasqueradeRoute(nc.routeManager, gwIntf, nc.name, ifAddrs, nc.watchFactory); err != nil { + if err := addMasqueradeRoute(nc.routeManager, kubeIntf, nc.name, ifAddrs, nc.watchFactory); err != nil { return fmt.Errorf("failed to set the node masquerade route to OVN: %v", err) } @@ -533,7 +465,7 @@ func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP) er return fmt.Errorf("failed to update masquerade subnet annotation on node: %s, error: %v", nc.name, err) } - err = configureSvcRouteViaInterface(nc.routeManager, gatewayIntf, DummyNextHopIPs()) + err = configureSvcRouteViaInterface(nc.routeManager, config.Gateway.Interface, DummyNextHopIPs()) if err != nil { return err } @@ -559,7 +491,7 @@ func (nc *DefaultNodeNetworkController) initGatewayDPUHost(kubeNodeIP net.IP) er gw.portClaimWatcher = portClaimWatcher } - if err := addHostMACBindings(gwIntf); err != nil { + if err := addHostMACBindings(kubeIntf); err != nil { return fmt.Errorf("failed to add MAC bindings for service routing") } @@ -603,7 +535,7 @@ func CleanupClusterNode(name string) error { func (nc *DefaultNodeNetworkController) updateGatewayMAC(link netlink.Link) error { // TBD-merge for dpu-host mode: if interface mac of the dpu-host interface that connects to the // gateway bridge on the dpu changes, we need to update dpu's gatewayBridge.macAddress L3 gateway - // annotation (see bridgeForInterface) + // annotation (see BridgeForInterface) if config.OvnKubeNode.Mode != types.NodeModeFull { return nil } diff --git a/go-controller/pkg/node/gateway_init_linux_test.go b/go-controller/pkg/node/gateway_init_linux_test.go index ddde471afa..d3b380d008 100644 --- a/go-controller/pkg/node/gateway_init_linux_test.go +++ b/go-controller/pkg/node/gateway_init_linux_test.go @@ -56,11 +56,14 @@ add table inet ovn-kubernetes add set inet ovn-kubernetes mgmtport-no-snat-nodeports { type inet_proto . inet_service ; comment "NodePorts not subject to management port SNAT" ; } add set inet ovn-kubernetes mgmtport-no-snat-services-v4 { type ipv4_addr . inet_proto . inet_service ; comment "eTP:Local short-circuit not subject to management port SNAT (IPv4)" ; } add set inet ovn-kubernetes mgmtport-no-snat-services-v6 { type ipv6_addr . inet_proto . inet_service ; comment "eTP:Local short-circuit not subject to management port SNAT (IPv6)" ; } +add set inet ovn-kubernetes mgmtport-no-snat-subnets-v4 { type ipv4_addr ; flags interval ; comment "subnets not subject to management port SNAT (IPv4)" ; } +add set inet ovn-kubernetes mgmtport-no-snat-subnets-v6 { type ipv6_addr ; flags interval ; comment "subnets not subject to management port SNAT (IPv6)" ; } add chain inet ovn-kubernetes mgmtport-snat { type nat hook postrouting priority 100 ; comment "OVN SNAT to Management Port" ; } add rule inet ovn-kubernetes mgmtport-snat oifname != %s return add rule inet ovn-kubernetes mgmtport-snat meta nfproto ipv4 ip saddr 10.1.1.2 counter return add rule inet ovn-kubernetes mgmtport-snat meta l4proto . th dport @mgmtport-no-snat-nodeports counter return add rule inet ovn-kubernetes mgmtport-snat ip daddr . meta l4proto . th dport @mgmtport-no-snat-services-v4 counter return +add rule inet ovn-kubernetes mgmtport-snat ip saddr @mgmtport-no-snat-subnets-v4 counter return add rule inet ovn-kubernetes mgmtport-snat counter snat ip to 10.1.1.2 ` @@ -77,6 +80,17 @@ add chain inet ovn-kubernetes udn-service-prerouting { type filter hook prerouti add rule inet ovn-kubernetes udn-service-prerouting iifname != %s jump udn-service-mark add chain inet ovn-kubernetes udn-service-output { type filter hook output priority -150 ; comment "UDN services packet mark - Output" ; } add rule inet ovn-kubernetes udn-service-output jump udn-service-mark +add chain inet ovn-kubernetes ovn-kube-udn-masq { comment "OVN UDN masquerade" ; } +add rule inet ovn-kubernetes ovn-kube-udn-masq ip saddr != 169.254.169.0/29 ip daddr != 172.16.1.0/24 ip saddr 169.254.169.0/24 masquerade +add rule inet ovn-kubernetes ovn-kube-local-gw-masq jump ovn-kube-udn-masq +` + +const baseLGWNFTablesRules = ` +add rule inet ovn-kubernetes ovn-kube-local-gw-masq ip saddr 169.254.169.1 masquerade +add chain inet ovn-kubernetes ovn-kube-local-gw-masq { type nat hook postrouting priority 101 ; comment "OVN local gateway masquerade" ; } +add rule inet ovn-kubernetes ovn-kube-local-gw-masq jump ovn-kube-pod-subnet-masq +add rule inet ovn-kubernetes ovn-kube-pod-subnet-masq ip saddr 10.1.1.0/24 masquerade +add chain inet ovn-kubernetes ovn-kube-pod-subnet-masq ` func getBaseNFTRules(mgmtPort string) string { @@ -87,6 +101,10 @@ func getBaseNFTRules(mgmtPort string) string { return ret } +func getBaseLGWNFTablesRules(mgmtPort string) string { + return getBaseNFTRules(mgmtPort) + baseLGWNFTablesRules +} + func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, eth0Name, eth0MAC, eth0GWIP, eth0CIDR string, gatewayVLANID uint, l netlink.Link, hwOffload, setNodeIP bool) { const mtu string = "1234" @@ -163,7 +181,7 @@ func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, }) if config.IPv4Mode { fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "sysctl -w net.ipv4.conf.breth0.forwarding=1", + Cmd: "sysctl -w net/ipv4/conf/breth0/forwarding=1", Output: "net.ipv4.conf.breth0.forwarding = 1", }) } @@ -192,6 +210,9 @@ func shareGatewayInterfaceTest(app *cli.App, testNS ns.NetNS, Cmd: "ovs-vsctl --timeout=15 --if-exists get Open_vSwitch . other_config:hw-offload", Output: fmt.Sprintf("%t", hwOffload), }) + fexec.AddFakeCmdsNoOutputNoError([]string{ + "ovs-appctl --timeout=15 fdb/add breth0 breth0 0 " + eth0MAC, + }) fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 get Interface patch-breth0_node1-to-br-int ofport", Output: "5", @@ -566,7 +587,7 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, // exec Mocks fexec := ovntest.NewLooseCompareFakeExec() // gatewayInitInternal - // bridgeForInterface + // BridgeForInterface fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 port-to-br " + brphys, Err: fmt.Errorf(""), @@ -600,7 +621,7 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, }) if config.IPv4Mode { fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "sysctl -w net.ipv4.conf.brp0.forwarding=1", + Cmd: "sysctl -w net/ipv4/conf/brp0/forwarding=1", Output: "net.ipv4.conf.brp0.forwarding = 1", }) } @@ -638,6 +659,9 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, Cmd: "ovs-vsctl --timeout=15 --if-exists get Open_vSwitch . other_config:hw-offload", Output: "false", }) + fexec.AddFakeCmdsNoOutputNoError([]string{ + fmt.Sprintf("ovs-appctl --timeout=15 fdb/add %s %s 0 %s", brphys, brphys, hostMAC), + }) // GetDPUHostInterface fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 list-ports " + brphys, @@ -724,6 +748,9 @@ func shareGatewayInterfaceDPUTest(app *cli.App, testNS ns.NetNS, k := &kube.Kube{KClient: kubeFakeClient} nodeAnnotator := kube.NewNodeAnnotator(k, existingNode.Name) + err = util.SetNodePrimaryDPUHostAddr(nodeAnnotator, ovntest.MustParseIPNets(nodeSubnet)) + config.Gateway.RouterSubnet = nodeSubnet + Expect(err).NotTo(HaveOccurred()) err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNets(nodeSubnet)) Expect(err).NotTo(HaveOccurred()) @@ -892,8 +919,11 @@ func shareGatewayInterfaceDPUHostTest(app *cli.App, testNS ns.NetNS, uplinkName, err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() + k := &kube.Kube{KClient: kubeFakeClient} - err := nc.initGatewayDPUHost(net.ParseIP(hostIP)) + nodeAnnotator := kube.NewNodeAnnotator(k, existingNode.Name) + + err := nc.initGatewayDPUHost(net.ParseIP(hostIP), nodeAnnotator) Expect(err).NotTo(HaveOccurred()) link, err := netlink.LinkByName(uplinkName) @@ -1062,7 +1092,7 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` }) if config.IPv4Mode { fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "sysctl -w net.ipv4.conf.breth0.forwarding=1", + Cmd: "sysctl -w net/ipv4/conf/breth0/forwarding=1", Output: "net.ipv4.conf.breth0.forwarding = 1", }) } @@ -1091,6 +1121,9 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` Cmd: "ovs-vsctl --timeout=15 --if-exists get Open_vSwitch . other_config:hw-offload", Output: "false", }) + fexec.AddFakeCmdsNoOutputNoError([]string{ + "ovs-appctl --timeout=15 fdb/add breth0 breth0 0 " + eth0MAC, + }) fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 get Interface patch-breth0_node1-to-br-int ofport", Output: "5", @@ -1340,10 +1373,6 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` "OVN-KUBE-EXTERNALIP": []string{ fmt.Sprintf("-p %s -d %s --dport %v -j DNAT --to-destination %s:%v", service.Spec.Ports[0].Protocol, externalIP, service.Spec.Ports[0].Port, service.Spec.ClusterIP, service.Spec.Ports[0].Port), }, - "POSTROUTING": []string{ - "-s 169.254.169.1 -j MASQUERADE", - "-s 10.1.1.0/24 -j MASQUERADE", - }, "OVN-KUBE-ETP": []string{}, "OVN-KUBE-ITP": []string{}, }, @@ -1369,24 +1398,6 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` "OVN-KUBE-ITP": []string{}, }, } - // OCP HACK: Block MCS Access. https://github.com/openshift/ovn-kubernetes/pull/170 - expectedMCSRules := []string{ - "-p tcp -m tcp --dport 22624 --syn -j REJECT", - "-p tcp -m tcp --dport 22623 --syn -j REJECT", - } - expectedTables["filter"]["FORWARD"] = append(expectedMCSRules, expectedTables["filter"]["FORWARD"]...) - expectedTables["filter"]["OUTPUT"] = append(expectedMCSRules, expectedTables["filter"]["OUTPUT"]...) - // END OCP HACK - if util.IsNetworkSegmentationSupportEnabled() { - expectedTables["nat"]["POSTROUTING"] = append(expectedTables["nat"]["POSTROUTING"], - "-j OVN-KUBE-UDN-MASQUERADE", - ) - expectedTables["nat"]["OVN-KUBE-UDN-MASQUERADE"] = append(expectedTables["nat"]["OVN-KUBE-UDN-MASQUERADE"], - "-s 169.254.169.0/29 -j RETURN", // this guarantees we don't SNAT default network masqueradeIPs - "-d 172.16.1.0/24 -j RETURN", // this guarantees we don't SNAT service traffic - "-s 169.254.169.0/24 -j MASQUERADE", // this guarantees we SNAT all UDN MasqueradeIPs traffic leaving the node - ) - } f4 := iptV4.(*util.FakeIPTables) err = f4.MatchState(expectedTables, map[util.FakePolicyKey]string{{ Table: "filter", @@ -1403,7 +1414,7 @@ OFPT_GET_CONFIG_REPLY (xid=0x4): frags=normal miss_send_len=0` err = f6.MatchState(expectedTables, nil) Expect(err).NotTo(HaveOccurred()) - expectedNFT := getBaseNFTRules(types.K8sMgmtIntfName) + expectedNFT := getBaseLGWNFTablesRules(types.K8sMgmtIntfName) err = nodenft.MatchNFTRules(expectedNFT, nft.Dump()) Expect(err).NotTo(HaveOccurred()) @@ -1659,47 +1670,6 @@ var _ = Describe("Gateway unit tests", func() { util.SetNetLinkOpMockInst(origNetlinkInst) }) - Context("getDPUHostPrimaryIPAddresses", func() { - - It("returns Gateway IP/Subnet for kubernetes node IP", func() { - _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") - nodeIP := net.ParseIP("10.0.0.11") - expectedGwSubnet := []*net.IPNet{ - {IP: nodeIP, Mask: net.CIDRMask(24, 32)}, - } - gwSubnet, err := getDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) - Expect(err).ToNot(HaveOccurred()) - Expect(gwSubnet).To(Equal(expectedGwSubnet)) - }) - - It("Fails if node IP is not in host subnets", func() { - _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") - nodeIP := net.ParseIP("10.0.1.11") - _, err := getDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) - Expect(err).To(HaveOccurred()) - }) - - It("returns node IP with config.Gateway.RouterSubnet subnet", func() { - config.Gateway.RouterSubnet = "10.1.0.0/16" - _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") - nodeIP := net.ParseIP("10.1.0.11") - expectedGwSubnet := []*net.IPNet{ - {IP: nodeIP, Mask: net.CIDRMask(16, 32)}, - } - gwSubnet, err := getDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) - Expect(err).ToNot(HaveOccurred()) - Expect(gwSubnet).To(Equal(expectedGwSubnet)) - }) - - It("Fails if node IP is not in config.Gateway.RouterSubnet subnet", func() { - config.Gateway.RouterSubnet = "10.1.0.0/16" - _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") - nodeIP := net.ParseIP("10.0.0.11") - _, err := getDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) - Expect(err).To(HaveOccurred()) - }) - }) - Context("getInterfaceByIP", func() { It("Finds correct interface", func() { lnk := &linkMock.Link{} diff --git a/go-controller/pkg/node/gateway_iptables.go b/go-controller/pkg/node/gateway_iptables.go index e9b6b12387..90bffbe91f 100644 --- a/go-controller/pkg/node/gateway_iptables.go +++ b/go-controller/pkg/node/gateway_iptables.go @@ -21,11 +21,10 @@ import ( ) const ( - iptableNodePortChain = "OVN-KUBE-NODEPORT" // called from nat-PREROUTING and nat-OUTPUT - iptableExternalIPChain = "OVN-KUBE-EXTERNALIP" // called from nat-PREROUTING and nat-OUTPUT - iptableETPChain = "OVN-KUBE-ETP" // called from nat-PREROUTING only - iptableITPChain = "OVN-KUBE-ITP" // called from mangle-OUTPUT and nat-OUTPUT - iptableUDNMasqueradeChain = "OVN-KUBE-UDN-MASQUERADE" // called from nat-POSTROUTING + iptableNodePortChain = "OVN-KUBE-NODEPORT" // called from nat-PREROUTING and nat-OUTPUT + iptableExternalIPChain = "OVN-KUBE-EXTERNALIP" // called from nat-PREROUTING and nat-OUTPUT + iptableETPChain = "OVN-KUBE-ETP" // called from nat-PREROUTING only + iptableITPChain = "OVN-KUBE-ITP" // called from mangle-OUTPUT and nat-OUTPUT ) func clusterIPTablesProtocols() []iptables.Protocol { @@ -69,29 +68,11 @@ func restoreIptRulesFiltered(rules []nodeipt.Rule, filter map[string]map[string] return nodeipt.RestoreRulesFiltered(rules, filter) } -// appendIptRules adds the provided rules in an append fashion -// i.e each rule gets added at the last position in the chain -func appendIptRules(rules []nodeipt.Rule) error { - return nodeipt.AddRules(rules, true) -} - // deleteIptRules removes provided rules from the chain func deleteIptRules(rules []nodeipt.Rule) error { return nodeipt.DelRules(rules) } -// ensureChain ensures that a chain exists within a table -func ensureChain(table, chain string) error { - for _, proto := range clusterIPTablesProtocols() { - ipt, err := util.GetIPTablesHelper(proto) - if err != nil { - return fmt.Errorf("failed to get IPTables helper to add UDN chain: %v", err) - } - addChaintoTable(ipt, table, chain) - } - return nil -} - func getGatewayInitRules(chain string, proto iptables.Protocol) []nodeipt.Rule { iptRules := []nodeipt.Rule{} if chain == iptableITPChain { @@ -403,123 +384,8 @@ func getLocalGatewayFilterRules(ifname string, cidr *net.IPNet) []nodeipt.Rule { } } -func getLocalGatewayPodSubnetNATRules(cidr *net.IPNet) []nodeipt.Rule { - protocol := getIPTablesProtocol(cidr.IP.String()) - return []nodeipt.Rule{ - { - Table: "nat", - Chain: "POSTROUTING", - Args: []string{ - "-s", cidr.String(), - "-j", "MASQUERADE", - }, - Protocol: protocol, - }, - } -} - -// getUDNMasqueradeRules is only called for local-gateway-mode -func getUDNMasqueradeRules(protocol iptables.Protocol) []nodeipt.Rule { - // the following rules are actively used only for the UDN Feature: - // -A POSTROUTING -j OVN-KUBE-UDN-MASQUERADE - // -A OVN-KUBE-UDN-MASQUERADE -s 169.254.0.0/29 -j RETURN - // -A OVN-KUBE-UDN-MASQUERADE -d 10.96.0.0/16 -j RETURN - // -A OVN-KUBE-UDN-MASQUERADE -s 169.254.0.0/17 -j MASQUERADE - // NOTE: Ordering is important here, the RETURN must come before - // the MASQUERADE rule. Please don't change the ordering. - srcUDNMasqueradePrefix := config.Gateway.V4MasqueradeSubnet - ipFamily := utilnet.IPv4 - if protocol == iptables.ProtocolIPv6 { - srcUDNMasqueradePrefix = config.Gateway.V6MasqueradeSubnet - ipFamily = utilnet.IPv6 - } - // defaultNetworkReservedMasqueradePrefix contains the first 6 IPs in the - // masquerade range that shouldn't be masqueraded. Hence it's always 3 bits (8 - // IPs) wide, regardless of IP family. - _, ipnet, _ := net.ParseCIDR(srcUDNMasqueradePrefix) - _, len := ipnet.Mask.Size() - defaultNetworkReservedMasqueradePrefix := fmt.Sprintf("%s/%d", ipnet.IP.String(), len-3) - - rules := []nodeipt.Rule{ - { - Table: "nat", - Chain: "POSTROUTING", - Args: []string{"-j", iptableUDNMasqueradeChain}, // NOTE: AddRules will take care of creating the chain - Protocol: protocol, - }, - { - Table: "nat", - Chain: iptableUDNMasqueradeChain, - Args: []string{ - "-s", defaultNetworkReservedMasqueradePrefix, - "-j", "RETURN", - }, - Protocol: protocol, - }, - } - for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { - if utilnet.IPFamilyOfCIDR(svcCIDR) != ipFamily { - continue - } - rules = append(rules, - nodeipt.Rule{ - Table: "nat", - Chain: iptableUDNMasqueradeChain, - Args: []string{ - "-d", svcCIDR.String(), - "-j", "RETURN", - }, - Protocol: protocol, - }, - ) - } - rules = append(rules, - nodeipt.Rule{ - Table: "nat", - Chain: iptableUDNMasqueradeChain, - Args: []string{ - "-s", srcUDNMasqueradePrefix, - "-j", "MASQUERADE", - }, - Protocol: protocol, - }, - ) - return rules -} - -func getLocalGatewayNATRules(cidr *net.IPNet) []nodeipt.Rule { - // Allow packets to/from the gateway interface in case defaults deny - protocol := getIPTablesProtocol(cidr.IP.String()) - masqueradeIP := config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP - if protocol == iptables.ProtocolIPv6 { - masqueradeIP = config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP - } - rules := append( - []nodeipt.Rule{ - { - Table: "nat", - Chain: "POSTROUTING", - Args: []string{ - "-s", masqueradeIP.String(), - "-j", "MASQUERADE", - }, - Protocol: protocol, - }, - }, - getLocalGatewayPodSubnetNATRules(cidr)..., - ) - - // FIXME(tssurya): If the feature is disabled we should be removing - // these rules - if util.IsNetworkSegmentationSupportEnabled() { - rules = append(rules, getUDNMasqueradeRules(protocol)...) - } - - return rules -} - -// initLocalGatewayNATRules sets up iptables rules for interfaces -func initLocalGatewayNATRules(ifname string, cidr *net.IPNet) error { +// initLocalGatewayIPTFilterRules sets up iptables rules for interfaces +func initLocalGatewayIPTFilterRules(ifname string, cidr *net.IPNet) error { // Insert the filter table rules because they need to be evaluated BEFORE the DROP rules // we have for forwarding. DO NOT change the ordering; specially important // during SGW->LGW rollouts and restarts. @@ -527,25 +393,8 @@ func initLocalGatewayNATRules(ifname string, cidr *net.IPNet) error { if err != nil { return fmt.Errorf("unable to insert forwarding rules %v", err) } - // append the masquerade rules in POSTROUTING table since that needs to be - // evaluated last. - return appendIptRules(getLocalGatewayNATRules(cidr)) -} - -func addLocalGatewayPodSubnetNATRules(cidrs ...*net.IPNet) error { - var rules []nodeipt.Rule - for _, cidr := range cidrs { - rules = append(rules, getLocalGatewayPodSubnetNATRules(cidr)...) - } - return appendIptRules(rules) -} - -func delLocalGatewayPodSubnetNATRules(cidrs ...*net.IPNet) error { - var rules []nodeipt.Rule - for _, cidr := range cidrs { - rules = append(rules, getLocalGatewayPodSubnetNATRules(cidr)...) - } - return deleteIptRules(rules) + // NOTE: nftables masquerade rules are now handled separately in initLocalGatewayNFTNATRules + return nil } func addChaintoTable(ipt util.IPTablesHelper, tableName, chain string) { diff --git a/go-controller/pkg/node/gateway_localnet.go b/go-controller/pkg/node/gateway_localnet.go index e0cc822844..6b8ed9aa0b 100644 --- a/go-controller/pkg/node/gateway_localnet.go +++ b/go-controller/pkg/node/gateway_localnet.go @@ -17,11 +17,11 @@ import ( func initLocalGateway(hostSubnets []*net.IPNet, mgmtPort managementport.Interface) error { klog.Info("Adding iptables masquerading rules for new local gateway") - if util.IsNetworkSegmentationSupportEnabled() { - if err := ensureChain("nat", iptableUDNMasqueradeChain); err != nil { - return fmt.Errorf("failed to ensure chain %s in NAT table: %w", iptableUDNMasqueradeChain, err) - } - } + + var allCIDRs []*net.IPNet + ifName := mgmtPort.GetInterfaceName() + + // First pass: collect all CIDRs and setup iptables filter rules per interface for _, hostSubnet := range hostSubnets { // local gateway mode uses mp0 as default path for all ingress traffic into OVN nextHop, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(hostSubnet), mgmtPort.GetAddresses()) @@ -32,11 +32,21 @@ func initLocalGateway(hostSubnets []*net.IPNet, mgmtPort managementport.Interfac // add iptables masquerading for mp0 to exit the host for egress cidr := nextHop.IP.Mask(nextHop.Mask) cidrNet := &net.IPNet{IP: cidr, Mask: nextHop.Mask} - ifName := mgmtPort.GetInterfaceName() - if err := initLocalGatewayNATRules(ifName, cidrNet); err != nil { + allCIDRs = append(allCIDRs, cidrNet) + + // Setup iptables filter rules for this interface/CIDR + if err := initLocalGatewayIPTFilterRules(ifName, cidrNet); err != nil { return fmt.Errorf("failed to add local NAT rules for: %s, err: %v", ifName, err) } } + + // setup nftables masquerade rules for all CIDRs (v4, v6 or dualstack) + if len(allCIDRs) > 0 { + if err := initLocalGatewayNFTNATRules(allCIDRs...); err != nil { + return fmt.Errorf("failed to setup nftables masquerade rules: %w", err) + } + } + return nil } diff --git a/go-controller/pkg/node/gateway_localnet_linux_test.go b/go-controller/pkg/node/gateway_localnet_linux_test.go index 013234e1b1..49e4d1ee13 100644 --- a/go-controller/pkg/node/gateway_localnet_linux_test.go +++ b/go-controller/pkg/node/gateway_localnet_linux_test.go @@ -21,6 +21,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" @@ -56,9 +57,8 @@ func initFakeNodePortWatcher(iptV4, iptV6 util.IPTablesHelper) *nodePortWatcher gwMACParsed, _ := net.ParseMAC(gwMAC) - defaultNetConfig := &bridgeUDNConfiguration{ - ofPortPatch: "patch-breth0_ov", - } + defaultBridge := bridgeconfig.TestDefaultBridgeConfig() + defaultBridge.SetMAC(gwMACParsed) fNPW := nodePortWatcher{ ofportPhys: "eth0", @@ -66,15 +66,11 @@ func initFakeNodePortWatcher(iptV4, iptV6 util.IPTablesHelper) *nodePortWatcher gatewayIPv6: v6localnetGatewayIP, serviceInfo: make(map[k8stypes.NamespacedName]*serviceConfig), ofm: &openflowManager{ - flowCache: map[string][]string{}, - defaultBridge: &bridgeConfiguration{ - macAddress: gwMACParsed, - netConfig: map[string]*bridgeUDNConfiguration{ - types.DefaultNetworkName: defaultNetConfig, - }, - }, + flowCache: map[string][]string{}, + defaultBridge: defaultBridge, }, networkManager: networkmanager.Default().Interface(), + gwBridge: bridgeconfig.TestBridgeConfig(""), } return &fNPW } diff --git a/go-controller/pkg/node/gateway_nftables.go b/go-controller/pkg/node/gateway_nftables.go index 6e341466ab..b38f2baebb 100644 --- a/go-controller/pkg/node/gateway_nftables.go +++ b/go-controller/pkg/node/gateway_nftables.go @@ -6,12 +6,15 @@ package node import ( "context" "fmt" + "net" "strings" corev1 "k8s.io/api/core/v1" utilnet "k8s.io/utils/net" "sigs.k8s.io/knftables" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -26,6 +29,13 @@ import ( // use an "accept" rule to override a later "drop" rule), then those rules will need to // either both be iptables or both be nftables. +// nftables chain names +const ( + nftablesLocalGatewayMasqChain = "ovn-kube-local-gw-masq" + nftablesPodSubnetMasqChain = "ovn-kube-pod-subnet-masq" + nftablesUDNMasqChain = "ovn-kube-udn-masq" +) + // getNoSNATNodePortRules returns elements to add to the "mgmtport-no-snat-nodeports" // set to prevent SNAT of sourceIP when passing through the management port, for an // `externalTrafficPolicy: Local` service with NodePorts. @@ -68,10 +78,10 @@ func getNoSNATLoadBalancerIPRules(svcPort corev1.ServicePort, localEndpoints []s // getUDNNodePortMarkNFTRule returns a verdict map element (nftablesUDNMarkNodePortsMap) // with a key composed of the svcPort protocol and port. // The value is a jump to the UDN chain mark if netInfo is provided, or nil that is useful for map entry removal. -func getUDNNodePortMarkNFTRule(svcPort corev1.ServicePort, netInfo *bridgeUDNConfiguration) *knftables.Element { +func getUDNNodePortMarkNFTRule(svcPort corev1.ServicePort, netInfo *bridgeconfig.BridgeUDNConfiguration) *knftables.Element { var val []string if netInfo != nil { - val = []string{fmt.Sprintf("jump %s", GetUDNMarkChain(netInfo.pktMark))} + val = []string{fmt.Sprintf("jump %s", GetUDNMarkChain(netInfo.PktMark))} } return &knftables.Element{ Map: nftablesUDNMarkNodePortsMap, @@ -84,12 +94,12 @@ func getUDNNodePortMarkNFTRule(svcPort corev1.ServicePort, netInfo *bridgeUDNCon // getUDNExternalIPsMarkNFTRules returns a verdict map elements (nftablesUDNMarkExternalIPsV4Map or nftablesUDNMarkExternalIPsV6Map) // with a key composed of the external IP, svcPort protocol and port. // The value is a jump to the UDN chain mark if netInfo is provided, or nil that is useful for map entry removal. -func getUDNExternalIPsMarkNFTRules(svcPort corev1.ServicePort, externalIPs []string, netInfo *bridgeUDNConfiguration) []*knftables.Element { +func getUDNExternalIPsMarkNFTRules(svcPort corev1.ServicePort, externalIPs []string, netInfo *bridgeconfig.BridgeUDNConfiguration) []*knftables.Element { var nftRules []*knftables.Element var val []string if netInfo != nil { - val = []string{fmt.Sprintf("jump %s", GetUDNMarkChain(netInfo.pktMark))} + val = []string{fmt.Sprintf("jump %s", GetUDNMarkChain(netInfo.PktMark))} } for _, externalIP := range externalIPs { mapName := nftablesUDNMarkExternalIPsV4Map @@ -175,7 +185,7 @@ func getGatewayNFTRules(service *corev1.Service, localEndpoints []string, svcHas // getUDNNFTRules generates nftables rules for a UDN service. // If netConfig is nil, the resulting map elements will have empty values, // suitable only for entry removal. -func getUDNNFTRules(service *corev1.Service, netConfig *bridgeUDNConfiguration) []*knftables.Element { +func getUDNNFTRules(service *corev1.Service, netConfig *bridgeconfig.BridgeUDNConfiguration) []*knftables.Element { rules := make([]*knftables.Element, 0) for _, svcPort := range service.Spec.Ports { if util.ServiceTypeHasNodePort(service) { @@ -185,3 +195,320 @@ func getUDNNFTRules(service *corev1.Service, netConfig *bridgeUDNConfiguration) } return rules } + +// getLocalGatewayPodSubnetMasqueradeNFTRule creates a rule for masquerading traffic from the pod subnet CIDR +// in local gateway node in a seperate chain which is then called from local gateway masquerade chain. +// +// chain ovn-kube-pod-subnet-masq { +// ip saddr 10.244.0.0/24 masquerade +// ip6 saddr fd00:10:244:1::/64 masquerade +// } +// +// If isAdvertisedNetwork is true, masquerade only when destination matches remote node IPs. +// Rules look like: +// ip saddr 10.244.0.0/24 ip daddr @remote-node-ips-v4 masquerade +// ip6 saddr fd00:10:244:1::/64 ip6 daddr @remote-node-ips-v6 masquerade +func getLocalGatewayPodSubnetMasqueradeNFTRule(cidr *net.IPNet, isAdvertisedNetwork bool) (*knftables.Rule, error) { + // Create the rule for masquerading traffic from the CIDR + var ipPrefix string + var remoteNodeSetName string + if utilnet.IsIPv6CIDR(cidr) { + ipPrefix = "ip6" + remoteNodeSetName = types.NFTRemoteNodeIPsv6 + } else { + ipPrefix = "ip" + remoteNodeSetName = types.NFTRemoteNodeIPsv4 + } + + // If network is advertised, only masquerade if destination is a remote node IP + var optionalDestRules []string + if isAdvertisedNetwork { + optionalDestRules = []string{ipPrefix, "daddr", "@", remoteNodeSetName} + } + rule := &knftables.Rule{ + Rule: knftables.Concat( + ipPrefix, "saddr", cidr, + optionalDestRules, + "masquerade", + ), + Chain: nftablesPodSubnetMasqChain, + } + + return rule, nil +} + +// getLocalGatewayNATNFTRules returns the nftables rules for local gateway NAT including masquerade IP rule, +// pod subnet rules, and UDN masquerade rules (if network segmentation is enabled). +// This function supports dual-stack by accepting multiple CIDRs and generating rules for all IP families. +// +// chain ovn-kube-local-gw-masq { +// comment "OVN local gateway masquerade" +// type nat hook postrouting priority srcnat; policy accept; +// ip saddr 169.254.0.1 masquerade +// ip6 saddr fd69::1 masquerade +// jump ovn-kube-pod-subnet-masq +// jump ovn-kube-udn-masq +// } +func getLocalGatewayNATNFTRules(cidrs ...*net.IPNet) ([]*knftables.Rule, error) { + var rules []*knftables.Rule + + // Process each CIDR to support dual-stack + for _, cidr := range cidrs { + // Determine IP version and masquerade IP + isIPv6 := utilnet.IsIPv6CIDR(cidr) + var masqueradeIP net.IP + var ipPrefix string + if isIPv6 { + masqueradeIP = config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP + ipPrefix = "ip6" + } else { + masqueradeIP = config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP + ipPrefix = "ip" + } + + // Rule1: Masquerade IP rule for the main chain + masqRule := &knftables.Rule{ + Chain: nftablesLocalGatewayMasqChain, + Rule: knftables.Concat( + ipPrefix, "saddr", masqueradeIP, + "masquerade", + ), + } + rules = append(rules, masqRule) + + // Rule2: Pod subnet NAT rule for the pod subnet chain + podSubnetRule, err := getLocalGatewayPodSubnetMasqueradeNFTRule(cidr, false) + if err != nil { + return nil, fmt.Errorf("failed to create pod subnet masquerade rule: %w", err) + } + rules = append(rules, podSubnetRule) + } + + // Rule 3: UDN masquerade rules (if network segmentation is enabled) + if util.IsNetworkSegmentationSupportEnabled() { + if config.IPv4Mode { + udnRules, err := getUDNMasqueradeNFTRules(utilnet.IPv4) + if err != nil { + return nil, fmt.Errorf("failed to create IPv4 UDN masquerade rules: %w", err) + } + rules = append(rules, udnRules...) + } + if config.IPv6Mode { + udnRules, err := getUDNMasqueradeNFTRules(utilnet.IPv6) + if err != nil { + return nil, fmt.Errorf("failed to create IPv6 UDN masquerade rules: %w", err) + } + rules = append(rules, udnRules...) + } + } + + return rules, nil +} + +// getUDNMasqueradeNFTRules returns the nftables rules for UDN masquerade. +// Chain creation is handled separately by setupLocalGatewayNATNFTRules. +// +// chain ovn-kube-udn-masq { +// comment "OVN UDN masquerade" +// ip saddr != 169.254.0.0/29 ip daddr != 10.96.0.0/16 ip saddr 169.254.0.0/17 masquerade +// ip6 saddr != fd69::/125 ip daddr != fd00:10:96::/112 ip6 saddr fd69::/112 masquerade +// } +func getUDNMasqueradeNFTRules(ipFamily utilnet.IPFamily) ([]*knftables.Rule, error) { + var rules []*knftables.Rule + + // Determine subnet and IP family + srcUDNMasqueradePrefix := config.Gateway.V4MasqueradeSubnet + ipPrefix := "ip" + if ipFamily == utilnet.IPv6 { + srcUDNMasqueradePrefix = config.Gateway.V6MasqueradeSubnet + ipPrefix = "ip6" + } + + // Calculate reserved masquerade prefix (first 8 IPs) + _, ipnet, err := net.ParseCIDR(srcUDNMasqueradePrefix) + if err != nil { + return nil, fmt.Errorf("failed to parse UDN masquerade subnet: %w", err) + } + _, prefixLen := ipnet.Mask.Size() + defaultNetworkReservedMasqueradePrefix := fmt.Sprintf("%s/%d", ipnet.IP.String(), prefixLen-3) + + // Rule: RETURN for reserved masquerade prefix and service CIDRs + // rest of the traffic is masqueraded + + for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { + if utilnet.IPFamilyOfCIDR(svcCIDR) != ipFamily { + continue + } + masqueradeRule := &knftables.Rule{ + Chain: nftablesUDNMasqChain, + Rule: knftables.Concat( + ipPrefix, "saddr", "!=", defaultNetworkReservedMasqueradePrefix, // this guarantees we don't SNAT default network masqueradeIPs + ipPrefix, "daddr", "!=", svcCIDR, // this guarantees we don't SNAT service traffic + ipPrefix, "saddr", srcUDNMasqueradePrefix, // this guarantees we SNAT all UDN MasqueradeIPs traffic leaving the node + "masquerade", + ), + } + rules = append(rules, masqueradeRule) + } + + return rules, nil +} + +// initLocalGatewayNFTNATRules sets up nftables rules for local gateway NAT functionality +// This function supports dual-stack by accepting multiple CIDRs and generating rules for all IP families +func initLocalGatewayNFTNATRules(cidrs ...*net.IPNet) error { + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return fmt.Errorf("failed to get nftables helper: %w", err) + } + + // Create transaction and apply all chains and rules + tx := nft.NewTransaction() + + // Create main local gateway masquerade chain + // Use priority 101 instead of defaultknftables.SNATPriority (100) to ensure + // iptables egress IP rules in OVN-KUBE-EGRESS-IP-MULTI-NIC chain run first + // this also ensure for egress-services, the + // chain egress-services { + // type nat hook postrouting priority srcnat; policy accept; + // is called before the local gateway masquerade chain + localGwMasqChain := &knftables.Chain{ + Name: nftablesLocalGatewayMasqChain, + Comment: knftables.PtrTo("OVN local gateway masquerade"), + Type: knftables.PtrTo(knftables.NATType), + Hook: knftables.PtrTo(knftables.PostroutingHook), + Priority: knftables.PtrTo(knftables.BaseChainPriority("101")), + } + tx.Add(localGwMasqChain) + + // Create dedicated pod subnet masquerade chain + podSubnetMasqChain := &knftables.Chain{ + Name: nftablesPodSubnetMasqChain, + } + tx.Add(podSubnetMasqChain) + + // Create UDN masquerade chain only if network segmentation is enabled + var udnMasqChain *knftables.Chain + if util.IsNetworkSegmentationSupportEnabled() { + udnMasqChain = &knftables.Chain{ + Name: nftablesUDNMasqChain, + Comment: knftables.PtrTo("OVN UDN masquerade"), + } + tx.Add(udnMasqChain) + } + + // Flush existing chains to ensure clean state + tx.Flush(localGwMasqChain) + tx.Flush(podSubnetMasqChain) + if util.IsNetworkSegmentationSupportEnabled() { + tx.Flush(udnMasqChain) + } + + // Get the existing local gateway NAT rules + localGwRules, err := getLocalGatewayNATNFTRules(cidrs...) + if err != nil { + return fmt.Errorf("failed to get local gateway NAT rules: %w", err) + } + + // Add the main local gateway NAT rules + for _, rule := range localGwRules { + tx.Add(rule) + } + + // Add jump rule from main chain to pod subnet chain + jumpToPodSubnetRule := &knftables.Rule{ + Chain: nftablesLocalGatewayMasqChain, + Rule: knftables.Concat( + "jump", nftablesPodSubnetMasqChain, + ), + } + tx.Add(jumpToPodSubnetRule) + + // Add jump rule to UDN chain only if network segmentation is enabled + if util.IsNetworkSegmentationSupportEnabled() { + jumpToUDNRule := &knftables.Rule{ + Chain: nftablesLocalGatewayMasqChain, + Rule: knftables.Concat( + "jump", nftablesUDNMasqChain, + ), + } + tx.Add(jumpToUDNRule) + } + + err = nft.Run(context.TODO(), tx) + if err != nil { + return fmt.Errorf("failed to setup local gateway NAT nftables rules: %w", err) + } + + return nil +} + +// addOrUpdateLocalGatewayPodSubnetNFTRules adds nftables rules for pod subnet masquerading for multiple CIDRs +// These rules are added to the dedicated pod subnet masquerade chain. +// If the rules already exist, they are updated. +// If isAdvertisedNetwork is true, the masquerade rules also get a destination match +// that matches the remote node IP set. +func addOrUpdateLocalGatewayPodSubnetNFTRules(isAdvertisedNetwork bool, cidrs ...*net.IPNet) error { + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return fmt.Errorf("failed to get nftables helper: %w", err) + } + + tx := nft.NewTransaction() + + // Ensure the pod subnet chain exists + podSubnetChain := &knftables.Chain{ + Name: nftablesPodSubnetMasqChain, + } + tx.Add(podSubnetChain) + + // Flush the chain to remove all existing rules + // if network toggles between advertised and non-advertised, we need to flush the chain and re-add correct rules + tx.Flush(podSubnetChain) + + // Add the new rules for each CIDR + for _, cidr := range cidrs { + rule, err := getLocalGatewayPodSubnetMasqueradeNFTRule(cidr, isAdvertisedNetwork) + if err != nil { + return fmt.Errorf("failed to create nftables rules for CIDR %s: %w", cidr.String(), err) + } + + // Add the rule + tx.Add(rule) + } + + if err := nft.Run(context.TODO(), tx); err != nil { + return fmt.Errorf("failed to add pod subnet NAT rules: %w", err) + } + + return nil +} + +// delLocalGatewayPodSubnetNFTRules removes nftables rules for pod subnet masquerading for multiple CIDRs +// Since we use a separate chain, we can simply flush it to remove all pod subnet rules. +func delLocalGatewayPodSubnetNFTRules() error { + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return fmt.Errorf("failed to get nftables helper: %w", err) + } + + tx := nft.NewTransaction() + + // In shared gateway mode, this chain might not exist if its + // not migration from local gateway mode. In that case, let's + // use the idiomatic way of adding the chain before trying to flush it. + // I anyways also have the knftables.IsNotFound() check in the caller later. + tx.Add(&knftables.Chain{ + Name: nftablesPodSubnetMasqChain, + }) + + // Simply flush the dedicated pod subnet masquerade chain + // This removes all pod subnet masquerade rules at once + tx.Flush(&knftables.Chain{Name: nftablesPodSubnetMasqChain}) + + if err := nft.Run(context.TODO(), tx); err != nil && !knftables.IsNotFound(err) { + return fmt.Errorf("failed to delete pod subnet NAT rules: %w", err) + } + + return nil +} diff --git a/go-controller/pkg/node/gateway_shared_intf.go b/go-controller/pkg/node/gateway_shared_intf.go index bcfa83718b..bd83448ba4 100644 --- a/go-controller/pkg/node/gateway_shared_intf.go +++ b/go-controller/pkg/node/gateway_shared_intf.go @@ -26,36 +26,25 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/egressip" nodeipt "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iptables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/linkmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/routemanager" + nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" + nodeutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" ) const ( - // defaultOpenFlowCookie identifies default open flow rules added to the host OVS bridge. - // The hex number 0xdeff105, aka defflos, is meant to sound like default flows. - defaultOpenFlowCookie = "0xdeff105" // etpSvcOpenFlowCookie identifies constant open flow rules added to the host OVS // bridge to move packets between host and external for etp=local traffic. // The hex number 0xe745ecf105, represents etp(e74)-service(5ec)-flows which makes it easier for debugging. etpSvcOpenFlowCookie = "0xe745ecf105" - // pmtudOpenFlowCookie identifies the flows used to drop ICMP type (3) destination unreachable, - // fragmentation-needed (4) - pmtudOpenFlowCookie = "0x0304" - // ovsLocalPort is the name of the OVS bridge local port - ovsLocalPort = "LOCAL" - // ctMarkOVN is the conntrack mark value for OVN traffic - ctMarkOVN = "0x1" - // ctMarkHost is the conntrack mark value for host traffic - ctMarkHost = "0x2" - // ovnKubeNodeSNATMark is used to mark packets that need to be SNAT-ed to nodeIP for - // traffic originating from egressIP and egressService controlled pods towards other nodes in the cluster. - ovnKubeNodeSNATMark = "0x3f0" // nftablesUDNServicePreroutingChain is a base chain registered into the prerouting hook, // and it contains one rule that jumps to nftablesUDNServiceMarkChain. @@ -92,10 +81,6 @@ const ( // to the appropriate network. nftablesUDNMarkExternalIPsV4Map = "udn-mark-external-ips-v4" nftablesUDNMarkExternalIPsV6Map = "udn-mark-external-ips-v6" - - // outputPortDrop is used to signify that there is no output port for an openflow action and the - // rendered action should result in a drop - outputPortDrop = "output-port-drop" ) // configureUDNServicesNFTables configures the nftables chains, rules, and verdict maps @@ -205,7 +190,7 @@ type nodePortWatcher struct { gatewayIPv6 string gatewayIPLock sync.Mutex ofportPhys string - gwBridge string + gwBridge *bridgeconfig.BridgeConfiguration // Map of service name to programmed iptables/OF rules serviceInfo map[ktypes.NamespacedName]*serviceConfig serviceInfoLock sync.Mutex @@ -231,11 +216,9 @@ type cidrAndFlags struct { validLifetime int } -func (npw *nodePortWatcher) updateGatewayIPs(addressManager *addressManager) { +func (npw *nodePortWatcher) updateGatewayIPs() { // Get Physical IPs of Node, Can be IPV4 IPV6 or both - addressManager.gatewayBridge.Lock() - gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(addressManager.gatewayBridge.ips) - addressManager.gatewayBridge.Unlock() + gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(npw.gwBridge.GetIPs()) npw.gatewayIPLock.Lock() defer npw.gatewayIPLock.Unlock() @@ -264,7 +247,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI return nil } - var netConfig *bridgeUDNConfiguration + var netConfig *bridgeconfig.BridgeUDNConfiguration var actions string if add { @@ -272,7 +255,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI if netConfig == nil { return fmt.Errorf("failed to get active network config for network %s", netInfo.GetNetworkName()) } - actions = fmt.Sprintf("output:%s", netConfig.ofPortPatch) + actions = fmt.Sprintf("output:%s", netConfig.OfPortPatch) } // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure @@ -352,7 +335,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI // table=0, matches on return traffic from service nodePort and sends it out to primary node interface (br-ex) fmt.Sprintf("cookie=%s, priority=110, in_port=%s, dl_src=%s, %s, tp_src=%d, "+ "actions=output:%s", - cookie, netConfig.ofPortPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, svcPort.NodePort, npw.ofportPhys)}) + cookie, netConfig.OfPortPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, svcPort.NodePort, npw.ofportPhys)}) } } } @@ -385,11 +368,11 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI var ofPorts []string // don't get the ports unless we need to as it is a costly operation if (len(extParsedIPs) > 0 || len(ingParsedIPs) > 0) && add { - ofPorts, err = util.GetOpenFlowPorts(npw.gwBridge, false) + ofPorts, err = util.GetOpenFlowPorts(npw.gwBridge.GetGatewayIface(), false) if err != nil { // in the odd case that getting all ports from the bridge should not work, // simply output to LOCAL (this should work well in the vast majority of cases, anyway) - klog.Warningf("Unable to get port list from bridge. Using ovsLocalPort as output only: error: %v", + klog.Warningf("Unable to get port list from bridge. Using OvsLocalPort as output only: error: %v", err) } } @@ -406,9 +389,9 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI // Add flows for default network services that are accessible from UDN networks if util.IsNetworkSegmentationSupportEnabled() { - // The flow added below has a higher priority than the per UDN service flow: - // priority=200, table=2, ip, ip_src=169.254.0., actions=set_field:->eth_dst,output: - // This ordering ensures that traffic to UDN allowed default services goes to the the default patch port. + // The flow added below has a higher priority than the per UDN service isolation flow: + // priority=200, table=2, ip, ip_src=169.254.0., actions=drop + // This ordering ensures that traffic to UDN allowed default services goes to the default patch port. if util.IsUDNEnabledService(ktypes.NamespacedName{Namespace: service.Namespace, Name: service.Name}.String()) { key = strings.Join([]string{"UDNAllowedSVC", service.Namespace, service.Name}, "_") @@ -422,14 +405,14 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI ipPrefix = "ipv6" } // table 2, user-defined network host -> OVN towards default cluster network services - defaultNetConfig := npw.ofm.defaultBridge.getActiveNetworkBridgeConfigCopy(types.DefaultNetworkName) + defaultNetConfig := npw.ofm.defaultBridge.GetActiveNetworkBridgeConfigCopy(types.DefaultNetworkName) // sample flow: cookie=0xdeff105, duration=2319.685s, table=2, n_packets=496, n_bytes=67111, priority=300, // ip,nw_dst=10.96.0.1 actions=mod_dl_dst:02:42:ac:12:00:03,output:"patch-breth0_ov" // This flow is used for UDNs and advertised UDNs to be able to reach kapi and dns services alone on default network flows := []string{fmt.Sprintf("cookie=%s, priority=300, table=2, %s, %s_dst=%s, "+ "actions=set_field:%s->eth_dst,output:%s", - defaultOpenFlowCookie, ipPrefix, ipPrefix, service.Spec.ClusterIP, - npw.ofm.getDefaultBridgeMAC().String(), defaultNetConfig.ofPortPatch)} + nodetypes.DefaultOpenFlowCookie, ipPrefix, ipPrefix, service.Spec.ClusterIP, + npw.ofm.getDefaultBridgeMAC().String(), defaultNetConfig.OfPortPatch)} if util.IsRouteAdvertisementsEnabled() { // if the network is advertised, then for the reply from kapi and dns services to go back // into the UDN's VRF we need flows that statically send this to the local port @@ -442,7 +425,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI // sample flow for non-advertised UDNs: cookie=0xdeff105, duration=684.087s, table=0, n_packets=0, n_bytes=0, // idle_age=684, priority=500,ip,in_port=2,nw_src=10.96.0.0/16,nw_dst=169.254.0.0/17 actions=ct(table=3,zone=64001,nat) flows = append(flows, fmt.Sprintf("cookie=%s, priority=490, in_port=%s, ip, ip_src=%s,actions=ct(zone=%d,nat,table=3)", - defaultOpenFlowCookie, defaultNetConfig.ofPortPatch, service.Spec.ClusterIP, config.Default.HostMasqConntrackZone)) + nodetypes.DefaultOpenFlowCookie, defaultNetConfig.OfPortPatch, service.Spec.ClusterIP, config.Default.HostMasqConntrackZone)) } npw.ofm.updateFlowCacheEntry(key, flows) } @@ -469,7 +452,7 @@ func (npw *nodePortWatcher) updateServiceFlowCache(service *corev1.Service, netI // `actions`: "send to patchport" // `externalIPOrLBIngressIP` is either externalIP.IP or LB.status.ingress.IP // `ipType` is either "External" or "Ingress" -func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, netConfig *bridgeUDNConfiguration, svcPort *corev1.ServicePort, add bool, +func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, netConfig *bridgeconfig.BridgeUDNConfiguration, svcPort *corev1.ServicePort, add bool, hasLocalHostNetworkEp bool, protocol string, actions string, externalIPOrLBIngressIPs []string, ipType string, ofPorts []string) error { for _, externalIPOrLBIngressIP := range externalIPOrLBIngressIPs { @@ -500,7 +483,7 @@ func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, continue } // add the ARP bypass flow regardless of service type or gateway modes since its applicable in all scenarios. - arpFlow := npw.generateARPBypassFlow(ofPorts, netConfig.ofPortPatch, externalIPOrLBIngressIP, cookie) + arpFlow := npw.generateARPBypassFlow(ofPorts, netConfig.OfPortPatch, externalIPOrLBIngressIP, cookie) externalIPFlows = append(externalIPFlows, arpFlow) // This allows external traffic ingress when the svc's ExternalTrafficPolicy is // set to Local, and the backend pod is HostNetworked. We need to add @@ -537,7 +520,7 @@ func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, etpSvcOpenFlowCookie, npw.ofportPhys)) } else if config.Gateway.Mode == config.GatewayModeShared { // add the ICMP Fragmentation flow for shared gateway mode. - icmpFlow := generateICMPFragmentationFlow(externalIPOrLBIngressIP, netConfig.ofPortPatch, npw.ofportPhys, cookie, 110) + icmpFlow := nodeutil.GenerateICMPFragmentationFlow(externalIPOrLBIngressIP, netConfig.OfPortPatch, npw.ofportPhys, cookie, 110) externalIPFlows = append(externalIPFlows, icmpFlow) // case2 (see function description for details) externalIPFlows = append(externalIPFlows, @@ -548,7 +531,7 @@ func (npw *nodePortWatcher) createLbAndExternalSvcFlows(service *corev1.Service, // table=0, matches on return traffic from service externalIP or LB ingress and sends it out to primary node interface (br-ex) fmt.Sprintf("cookie=%s, priority=110, in_port=%s, dl_src=%s, %s, %s=%s, tp_src=%d, "+ "actions=output:%s", - cookie, netConfig.ofPortPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, nwSrc, externalIPOrLBIngressIP, svcPort.Port, npw.ofportPhys)) + cookie, netConfig.OfPortPatch, npw.ofm.getDefaultBridgeMAC(), flowProtocol, nwSrc, externalIPOrLBIngressIP, svcPort.Port, npw.ofportPhys)) } npw.ofm.updateFlowCacheEntry(key, externalIPFlows) } @@ -573,7 +556,7 @@ func (npw *nodePortWatcher) generateARPBypassFlow(ofPorts []string, ofPortPatch, // simply output to LOCAL (this should work well in the vast majority of cases, anyway) arpFlow = fmt.Sprintf("cookie=%s, priority=110, in_port=%s, %s, %s=%s, "+ "actions=output:%s", - cookie, npw.ofportPhys, addrResProto, addrResDst, ipAddr, ovsLocalPort) + cookie, npw.ofportPhys, addrResProto, addrResDst, ipAddr, nodetypes.OvsLocalPort) } else { // cover the case where breth0 has more than 3 ports, e.g. if an admin adds a 4th port // and the ExternalIP would be on that port @@ -603,31 +586,6 @@ func (npw *nodePortWatcher) generateARPBypassFlow(ofPorts []string, ofPortPatch, return arpFlow } -func generateICMPFragmentationFlow(ipAddr, outputPort, inPort, cookie string, priority int) string { - // we send any ICMP destination unreachable, fragmentation needed to the OVN pipeline too so that - // path MTU discovery continues to work. - icmpMatch := "icmp" - icmpType := 3 - icmpCode := 4 - nwDst := "nw_dst" - if utilnet.IsIPv6String(ipAddr) { - icmpMatch = "icmp6" - icmpType = 2 - icmpCode = 0 - nwDst = "ipv6_dst" - } - - action := fmt.Sprintf("output:%s", outputPort) - if outputPort == outputPortDrop { - action = "drop" - } - - icmpFragmentationFlow := fmt.Sprintf("cookie=%s, priority=%d, in_port=%s, %s, %s=%s, icmp_type=%d, "+ - "icmp_code=%d, actions=%s", - cookie, priority, inPort, icmpMatch, nwDst, ipAddr, icmpType, icmpCode, action) - return icmpFragmentationFlow -} - // getAndDeleteServiceInfo returns the serviceConfig for a service and if it exists and then deletes the entry func (npw *nodePortWatcher) getAndDeleteServiceInfo(index ktypes.NamespacedName) (out *serviceConfig, exists bool) { npw.serviceInfoLock.Lock() @@ -706,7 +664,7 @@ func addServiceRules(service *corev1.Service, netInfo util.NetInfo, localEndpoin // For dpu or Full mode var err error var errors []error - var activeNetwork *bridgeUDNConfiguration + var activeNetwork *bridgeconfig.BridgeUDNConfiguration if npw != nil { if err = npw.updateServiceFlowCache(service, netInfo, true, svcHasLocalHostNetEndPnt); err != nil { errors = append(errors, err) @@ -1451,913 +1409,6 @@ func (npwipt *nodePortWatcherIptables) SyncServices(services []interface{}) erro return utilerrors.Join(errors...) } -func flowsForDefaultBridge(bridge *bridgeConfiguration, extraIPs []net.IP) ([]string, error) { - // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure - // that dl_src is included in match criteria! - - ofPortPhys := bridge.ofPortPhys - bridgeMacAddress := bridge.macAddress.String() - ofPortHost := bridge.ofPortHost - bridgeIPs := bridge.ips - - var dftFlows []string - // 14 bytes of overhead for ethernet header (does not include VLAN) - maxPktLength := getMaxFrameLength() - - strip_vlan := "" - mod_vlan_id := "" - match_vlan := "" - if config.Gateway.VLANID != 0 { - strip_vlan = "strip_vlan," - match_vlan = fmt.Sprintf("dl_vlan=%d,", config.Gateway.VLANID) - mod_vlan_id = fmt.Sprintf("mod_vlan_vid:%d,", config.Gateway.VLANID) - } - - if config.IPv4Mode { - // table0, Geneve packets coming from external. Skip conntrack and go directly to host - // if dest mac is the shared mac send directly to host. - if ofPortPhys != "" { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=205, in_port=%s, dl_dst=%s, udp, udp_dst=%d, "+ - "actions=output:%s", defaultOpenFlowCookie, ofPortPhys, bridgeMacAddress, config.Default.EncapPort, - ofPortHost)) - // perform NORMAL action otherwise. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp, udp_dst=%d, "+ - "actions=NORMAL", defaultOpenFlowCookie, ofPortPhys, config.Default.EncapPort)) - - // table0, Geneve packets coming from LOCAL/Host OFPort. Skip conntrack and go directly to external - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp, udp_dst=%d, "+ - "actions=output:%s", defaultOpenFlowCookie, ofPortHost, config.Default.EncapPort, ofPortPhys)) - } - physicalIP, err := util.MatchFirstIPNetFamily(false, bridgeIPs) - if err != nil { - return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) - } - for _, netConfig := range bridge.patchedNetConfigs() { - // table 0, SVC Hairpin from OVN destined to local host, DNAT and go to table 4 - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ - "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", - defaultOpenFlowCookie, netConfig.ofPortPatch, config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String(), physicalIP.IP, - config.Default.HostMasqConntrackZone, physicalIP.IP)) - } - - // table 0, hairpin from OVN destined to local host (but an additional node IP), send to table 4 - for _, ip := range extraIPs { - if ip.To4() == nil { - continue - } - // not needed for the physical IP - if ip.Equal(physicalIP.IP) { - continue - } - - // not needed for special masquerade IP - if ip.Equal(config.Gateway.MasqueradeIPs.V4HostMasqueradeIP) { - continue - } - - for _, netConfig := range bridge.patchedNetConfigs() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s, ip_src=%s,"+ - "actions=ct(commit,zone=%d,table=4)", - defaultOpenFlowCookie, netConfig.ofPortPatch, ip.String(), physicalIP.IP, - config.Default.HostMasqConntrackZone)) - } - } - - // table 0, Reply SVC traffic from Host -> OVN, unSNAT and goto table 5 - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ip, ip_dst=%s,"+ - "actions=ct(zone=%d,nat,table=5)", - defaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) - } - if config.IPv6Mode { - if ofPortPhys != "" { - // table0, Geneve packets coming from external. Skip conntrack and go directly to host - // if dest mac is the shared mac send directly to host. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=205, in_port=%s, dl_dst=%s, udp6, udp_dst=%d, "+ - "actions=output:%s", defaultOpenFlowCookie, ofPortPhys, bridgeMacAddress, config.Default.EncapPort, - ofPortHost)) - // perform NORMAL action otherwise. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp6, udp_dst=%d, "+ - "actions=NORMAL", defaultOpenFlowCookie, ofPortPhys, config.Default.EncapPort)) - - // table0, Geneve packets coming from LOCAL. Skip conntrack and send to external - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, in_port=%s, udp6, udp_dst=%d, "+ - "actions=output:%s", defaultOpenFlowCookie, ovsLocalPort, config.Default.EncapPort, ofPortPhys)) - } - - physicalIP, err := util.MatchFirstIPNetFamily(true, bridgeIPs) - if err != nil { - return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) - } - // table 0, SVC Hairpin from OVN destined to local host, DNAT to host, send to table 4 - for _, netConfig := range bridge.patchedNetConfigs() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ - "actions=ct(commit,zone=%d,nat(dst=%s),table=4)", - defaultOpenFlowCookie, netConfig.ofPortPatch, config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String(), physicalIP.IP, - config.Default.HostMasqConntrackZone, physicalIP.IP)) - } - - // table 0, hairpin from OVN destined to local host (but an additional node IP), send to table 4 - for _, ip := range extraIPs { - if ip.To4() != nil { - continue - } - // not needed for the physical IP - if ip.Equal(physicalIP.IP) { - continue - } - - // not needed for special masquerade IP - if ip.Equal(config.Gateway.MasqueradeIPs.V6HostMasqueradeIP) { - continue - } - - for _, netConfig := range bridge.patchedNetConfigs() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s, ipv6_src=%s,"+ - "actions=ct(commit,zone=%d,table=4)", - defaultOpenFlowCookie, netConfig.ofPortPatch, ip.String(), physicalIP.IP, - config.Default.HostMasqConntrackZone)) - } - } - - // table 0, Reply SVC traffic from Host -> OVN, unSNAT and goto table 5 - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, ipv6, ipv6_dst=%s,"+ - "actions=ct(zone=%d,nat,table=5)", - defaultOpenFlowCookie, ofPortHost, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String(), config.Default.OVNMasqConntrackZone)) - } - - var protoPrefix, masqIP, masqSubnet string - - // table 0, packets coming from Host -> Service - for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { - if utilnet.IsIPv4CIDR(svcCIDR) { - protoPrefix = "ip" - masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() - masqSubnet = config.Gateway.V4MasqueradeSubnet - } else { - protoPrefix = "ipv6" - masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() - masqSubnet = config.Gateway.V6MasqueradeSubnet - } - - // table 0, Host (default network) -> OVN towards SVC, SNAT to special IP. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_dst=%s, "+ - "actions=ct(commit,zone=%d,nat(src=%s),table=2)", - defaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, - svcCIDR, config.Default.HostMasqConntrackZone, masqIP)) - - if util.IsNetworkSegmentationSupportEnabled() { - // table 0, Host (UDNs) -> OVN towards SVC, SNAT to special IP. - // For packets originating from UDN, commit without NATing, those - // have already been SNATed to the masq IP of the UDN. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, "+ - "actions=ct(commit,zone=%d,table=2)", - defaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, - masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) - if util.IsRouteAdvertisementsEnabled() { - // If the UDN is advertised then instead of matching on the masqSubnet - // we match on the UDNPodSubnet itself and we also don't SNAT to 169.254.0.2 - // sample flow: cookie=0xdeff105, duration=1472.742s, table=0, n_packets=9, n_bytes=666, priority=550 - // ip,in_port=LOCAL,nw_src=103.103.0.0/16,nw_dst=10.96.0.0/16 actions=ct(commit,table=2,zone=64001) - for _, netConfig := range bridge.patchedNetConfigs() { - if netConfig.isDefaultNetwork() { - continue - } - if netConfig.advertised.Load() { - var udnAdvertisedSubnets []*net.IPNet - for _, clusterEntry := range netConfig.subnets { - udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) - } - // Filter subnets based on the clusterIP service family - // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one - matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(svcCIDR), udnAdvertisedSubnets) - if err != nil { - klog.Infof("Unable to determine UDN subnet for the provided family isIPV6: %t, %v", utilnet.IsIPv6CIDR(svcCIDR), err) - continue - } - - // Use the filtered subnet for the flow compute instead of the masqueradeIP - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, "+ - "actions=ct(commit,zone=%d,table=2)", - defaultOpenFlowCookie, ofPortHost, protoPrefix, protoPrefix, - matchingIPFamilySubnet.String(), protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) - } - } - } - } - - masqDst := masqIP - if util.IsNetworkSegmentationSupportEnabled() { - // In UDN match on the whole masquerade subnet to handle replies from UDN enabled services - masqDst = masqSubnet - } - for _, netConfig := range bridge.patchedNetConfigs() { - // table 0, Reply hairpin traffic to host, coming from OVN, unSNAT - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=500, in_port=%s, %s, %s_src=%s, %s_dst=%s,"+ - "actions=ct(zone=%d,nat,table=3)", - defaultOpenFlowCookie, netConfig.ofPortPatch, protoPrefix, protoPrefix, svcCIDR, - protoPrefix, masqDst, config.Default.HostMasqConntrackZone)) - // table 0, Reply traffic coming from OVN to outside, drop it if the DNAT wasn't done either - // at the GR load balancer or switch load balancer. It means the correct port wasn't provided. - // nodeCIDR->serviceCIDR traffic flow is internal and it shouldn't be carried to outside the cluster - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, %s, %s_dst=%s,"+ - "actions=drop", defaultOpenFlowCookie, netConfig.ofPortPatch, protoPrefix, protoPrefix, svcCIDR)) - } - } - - // table 0, add IP fragment reassembly flows, only needed in SGW mode with - // physical interface attached to bridge - if config.Gateway.Mode == config.GatewayModeShared && ofPortPhys != "" { - reassemblyFlows := generateIPFragmentReassemblyFlow(ofPortPhys) - dftFlows = append(dftFlows, reassemblyFlows...) - } - if ofPortPhys != "" { - for _, netConfig := range bridge.patchedNetConfigs() { - var actions string - if config.Gateway.Mode != config.GatewayModeLocal || config.Gateway.DisablePacketMTUCheck { - actions = fmt.Sprintf("output:%s", netConfig.ofPortPatch) - } else { - // packets larger than known acceptable MTU need to go to kernel for - // potential fragmentation - // introduced specifically for replies to egress traffic not routed - // through the host - actions = fmt.Sprintf("check_pkt_larger(%d)->reg0[0],resubmit(,11)", maxPktLength) - } - - if config.IPv4Mode { - // table 1, established and related connections in zone 64000 with ct_mark ctMarkOVN go to OVN - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%s", defaultOpenFlowCookie, netConfig.masqCTMark, actions)) - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ip, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%s", defaultOpenFlowCookie, netConfig.masqCTMark, actions)) - - } - - if config.IPv6Mode { - // table 1, established and related connections in zone 64000 with ct_mark ctMarkOVN go to OVN - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%s", defaultOpenFlowCookie, netConfig.masqCTMark, actions)) - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, ipv6, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%s", defaultOpenFlowCookie, netConfig.masqCTMark, actions)) - } - } - if config.IPv4Mode { - // table 1, established and related connections in zone 64000 with ct_mark ctMarkHost go to host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%soutput:%s", - defaultOpenFlowCookie, match_vlan, ctMarkHost, strip_vlan, ofPortHost)) - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%soutput:%s", - defaultOpenFlowCookie, match_vlan, ctMarkHost, strip_vlan, ofPortHost)) - - } - if config.IPv6Mode { - // table 1, established and related connections in zone 64000 with ct_mark ctMarkHost go to host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip6, ct_state=+trk+est, ct_mark=%s, "+ - "actions=%soutput:%s", - defaultOpenFlowCookie, match_vlan, ctMarkHost, strip_vlan, ofPortHost)) - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=1, %s ip6, ct_state=+trk+rel, ct_mark=%s, "+ - "actions=%soutput:%s", - defaultOpenFlowCookie, match_vlan, ctMarkHost, strip_vlan, ofPortHost)) - - } - - // table 1, we check to see if this dest mac is the shared mac, if so send to host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=1, %s dl_dst=%s, actions=%soutput:%s", - defaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) - } - - defaultNetConfig := bridge.netConfig[types.DefaultNetworkName] - - // table 2, dispatch from Host -> OVN - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, table=2, "+ - "actions=set_field:%s->eth_dst,%soutput:%s", defaultOpenFlowCookie, - bridgeMacAddress, mod_vlan_id, defaultNetConfig.ofPortPatch)) - - // table 2, priority 200, dispatch from UDN -> Host -> OVN. These packets have - // already been SNATed to the UDN's masq IP or have been marked with the UDN's packet mark. - if config.IPv4Mode { - for _, netConfig := range bridge.patchedNetConfigs() { - if netConfig.isDefaultNetwork() { - continue - } - srcIPOrSubnet := netConfig.v4MasqIPs.ManagementPort.IP.String() - if util.IsRouteAdvertisementsEnabled() && netConfig.advertised.Load() { - var udnAdvertisedSubnets []*net.IPNet - for _, clusterEntry := range netConfig.subnets { - udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) - } - // Filter subnets based on the clusterIP service family - // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one - matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) - if err != nil { - klog.Infof("Unable to determine IPV4 UDN subnet for the provided family isIPV6: %v", err) - continue - } - - // Use the filtered subnets for the flow compute instead of the masqueradeIP - srcIPOrSubnet = matchingIPFamilySubnet.String() - } - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, table=2, ip, ip_src=%s, "+ - "actions=set_field:%s->eth_dst,output:%s", - defaultOpenFlowCookie, srcIPOrSubnet, - bridgeMacAddress, netConfig.ofPortPatch)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, table=2, ip, pkt_mark=%s, "+ - "actions=set_field:%s->eth_dst,output:%s", - defaultOpenFlowCookie, netConfig.pktMark, - bridgeMacAddress, netConfig.ofPortPatch)) - } - } - - if config.IPv6Mode { - for _, netConfig := range bridge.patchedNetConfigs() { - if netConfig.isDefaultNetwork() { - continue - } - srcIPOrSubnet := netConfig.v6MasqIPs.ManagementPort.IP.String() - if util.IsRouteAdvertisementsEnabled() && netConfig.advertised.Load() { - var udnAdvertisedSubnets []*net.IPNet - for _, clusterEntry := range netConfig.subnets { - udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) - } - // Filter subnets based on the clusterIP service family - // NOTE: We don't support more than 1 subnet CIDR of same family type; we only pick the first one - matchingIPFamilySubnet, err := util.MatchFirstIPNetFamily(true, udnAdvertisedSubnets) - if err != nil { - klog.Infof("Unable to determine IPV6 UDN subnet for the provided family isIPV6: %v", err) - continue - } - - // Use the filtered subnets for the flow compute instead of the masqueradeIP - srcIPOrSubnet = matchingIPFamilySubnet.String() - } - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, ipv6_src=%s, "+ - "actions=set_field:%s->eth_dst,output:%s", - defaultOpenFlowCookie, srcIPOrSubnet, - bridgeMacAddress, netConfig.ofPortPatch)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=200, table=2, ip6, pkt_mark=%s, "+ - "actions=set_field:%s->eth_dst,output:%s", - defaultOpenFlowCookie, netConfig.pktMark, - bridgeMacAddress, netConfig.ofPortPatch)) - } - } - - // table 3, dispatch from OVN -> Host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=3, %s "+ - "actions=move:NXM_OF_ETH_DST[]->NXM_OF_ETH_SRC[],set_field:%s->eth_dst,%soutput:%s", - defaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) - - // table 4, hairpinned pkts that need to go from OVN -> Host - // We need to SNAT and masquerade OVN GR IP, send to table 3 for dispatch to Host - if config.IPv4Mode { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=4,ip,"+ - "actions=ct(commit,zone=%d,nat(src=%s),table=3)", - defaultOpenFlowCookie, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V4OVNMasqueradeIP.String())) - } - if config.IPv6Mode { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=4,ipv6, "+ - "actions=ct(commit,zone=%d,nat(src=%s),table=3)", - defaultOpenFlowCookie, config.Default.OVNMasqConntrackZone, config.Gateway.MasqueradeIPs.V6OVNMasqueradeIP.String())) - } - // table 5, Host Reply traffic to hairpinned svc, need to unDNAT, send to table 2 - if config.IPv4Mode { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=5, ip, "+ - "actions=ct(commit,zone=%d,nat,table=2)", - defaultOpenFlowCookie, config.Default.HostMasqConntrackZone)) - } - if config.IPv6Mode { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, table=5, ipv6, "+ - "actions=ct(commit,zone=%d,nat,table=2)", - defaultOpenFlowCookie, config.Default.HostMasqConntrackZone)) - } - return dftFlows, nil -} - -func commonFlows(hostSubnets []*net.IPNet, bridge *bridgeConfiguration) ([]string, error) { - // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure - // that dl_src is included in match criteria! - ofPortPhys := bridge.ofPortPhys - bridgeMacAddress := bridge.macAddress.String() - ofPortHost := bridge.ofPortHost - bridgeIPs := bridge.ips - - var dftFlows []string - - strip_vlan := "" - match_vlan := "" - mod_vlan_id := "" - if config.Gateway.VLANID != 0 { - strip_vlan = "strip_vlan," - match_vlan = fmt.Sprintf("dl_vlan=%d,", config.Gateway.VLANID) - mod_vlan_id = fmt.Sprintf("mod_vlan_vid:%d,", config.Gateway.VLANID) - } - - if ofPortPhys != "" { - // table 0, we check to see if this dest mac is the shared mac, if so flood to all ports - actions := "" - for _, netConfig := range bridge.patchedNetConfigs() { - actions += "output:" + netConfig.ofPortPatch + "," - } - actions += strip_vlan + "output:" + ofPortHost - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, %s dl_dst=%s, actions=%s", - defaultOpenFlowCookie, ofPortPhys, match_vlan, bridgeMacAddress, actions)) - } - - // table 0, check packets coming from OVN have the correct mac address. Low priority flows that are a catch all - // for non-IP packets that would normally be forwarded with NORMAL action (table 0, priority 0 flow). - for _, netConfig := range bridge.patchedNetConfigs() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, dl_src=%s, actions=output:NORMAL", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=9, table=0, in_port=%s, actions=drop", - defaultOpenFlowCookie, netConfig.ofPortPatch)) - } - - if config.IPv4Mode { - physicalIP, err := util.MatchFirstIPNetFamily(false, bridgeIPs) - if err != nil { - return nil, fmt.Errorf("unable to determine IPv4 physical IP of host: %v", err) - } - if ofPortPhys != "" { - for _, netConfig := range bridge.patchedNetConfigs() { - // table0, packets coming from egressIP pods that have mark 1008 on them - // will be SNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR - // SNATs these into egressIP prior to reaching external bridge. - // egressService pods will also undergo this SNAT to nodeIP since these features are tied - // together at the OVN policy level on the distributed router. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%s "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, ovnKubeNodeSNATMark, - config.Default.ConntrackZone, physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) - - // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to - // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. - if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && - config.Gateway.Mode != config.GatewayModeDisabled && bridge.eipMarkIPs != nil { - if netConfig.masqCTMark != ctMarkOVN { - for mark, eip := range bridge.eipMarkIPs.GetIPv4() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ip, pkt_mark=%d, "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, mark, - config.Default.ConntrackZone, eip, netConfig.masqCTMark, ofPortPhys)) - } - } - } - - // table 0, packets coming from pods headed externally. Commit connections with ct_mark ctMarkOVN - // so that reverse direction goes back to the pods. - if netConfig.isDefaultNetwork() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, config.Default.ConntrackZone, - netConfig.masqCTMark, ofPortPhys)) - - // Allow OVN->Host traffic on the same node - if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { - dftFlows = append(dftFlows, ovnToHostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, false)...) - } - } else { - // for UDN we additionally SNAT the packet from masquerade IP -> node IP - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ip, ip_src=%s, "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, netConfig.v4MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, - physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) - } - } - - // table 0, packets coming from host Commit connections with ct_mark ctMarkHost - // so that reverse direction goes back to the host. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, ip, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), %soutput:%s", - defaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, ctMarkHost, mod_vlan_id, ofPortPhys)) - } - if config.Gateway.Mode == config.GatewayModeLocal { - for _, netConfig := range bridge.patchedNetConfigs() { - // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. - // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp, nw_src=%s, "+ - "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp, nw_src=%s, "+ - "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp, nw_src=%s, "+ - "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - // We send BFD traffic coming from OVN to outside directly using a higher priority flow - if ofPortPhys != "" { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=650, table=0, in_port=%s, dl_src=%s, udp, tp_dst=3784, actions=output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, ofPortPhys)) - } - } - } - - if ofPortPhys != "" { - // table 0, packets coming from external. Send it through conntrack and - // resubmit to table 1 to know the state and mark of the connection. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=50, in_port=%s, ip, "+ - "actions=ct(zone=%d, nat, table=1)", defaultOpenFlowCookie, ofPortPhys, config.Default.ConntrackZone)) - } - } - - if config.IPv6Mode { - physicalIP, err := util.MatchFirstIPNetFamily(true, bridgeIPs) - if err != nil { - return nil, fmt.Errorf("unable to determine IPv6 physical IP of host: %v", err) - } - if ofPortPhys != "" { - for _, netConfig := range bridge.patchedNetConfigs() { - // table0, packets coming from egressIP pods that have mark 1008 on them - // will be DNAT-ed a final time into nodeIP to maintain consistency in traffic even if the GR - // DNATs these into egressIP prior to reaching external bridge. - // egressService pods will also undergo this SNAT to nodeIP since these features are tied - // together at the OVN policy level on the distributed router. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%s "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)),output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, ovnKubeNodeSNATMark, - config.Default.ConntrackZone, physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) - - // table 0, packets coming from egressIP pods only from user defined networks. If an egressIP is assigned to - // this node, then all networks get a flow even if no pods on that network were selected for by this egressIP. - if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && - config.Gateway.Mode != config.GatewayModeDisabled && bridge.eipMarkIPs != nil { - if netConfig.masqCTMark != ctMarkOVN { - for mark, eip := range bridge.eipMarkIPs.GetIPv6() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=105, in_port=%s, dl_src=%s, ipv6, pkt_mark=%d, "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, mark, - config.Default.ConntrackZone, eip, netConfig.masqCTMark, ofPortPhys)) - } - } - } - - // table 0, packets coming from pods headed externally. Commit connections with ct_mark ctMarkOVN - // so that reverse direction goes back to the pods. - if netConfig.isDefaultNetwork() { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, config.Default.ConntrackZone, netConfig.masqCTMark, ofPortPhys)) - - // Allow OVN->Host traffic on the same node - if config.Gateway.Mode == config.GatewayModeShared || config.Gateway.Mode == config.GatewayModeLocal { - dftFlows = append(dftFlows, ovnToHostNetworkNormalActionFlows(netConfig, bridgeMacAddress, hostSubnets, true)...) - } - } else { - // for UDN we additionally SNAT the packet from masquerade IP -> node IP - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, dl_src=%s, ipv6, ipv6_src=%s, "+ - "actions=ct(commit, zone=%d, nat(src=%s), exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, netConfig.v6MasqIPs.GatewayRouter.IP, config.Default.ConntrackZone, - physicalIP.IP, netConfig.masqCTMark, ofPortPhys)) - } - } - - // table 0, packets coming from host. Commit connections with ct_mark ctMarkHost - // so that reverse direction goes back to the host. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=100, in_port=%s, ipv6, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), %soutput:%s", - defaultOpenFlowCookie, ofPortHost, config.Default.ConntrackZone, ctMarkHost, mod_vlan_id, ofPortPhys)) - - } - if config.Gateway.Mode == config.GatewayModeLocal { - for _, netConfig := range bridge.patchedNetConfigs() { - // table 0, any packet coming from OVN send to host in LGW mode, host will take care of sending it outside if needed. - // exceptions are traffic for egressIP and egressGW features and ICMP related traffic which will hit the priority 100 flow instead of this. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, tcp6, ipv6_src=%s, "+ - "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, udp6, ipv6_src=%s, "+ - "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=175, in_port=%s, sctp6, ipv6_src=%s, "+ - "actions=ct(table=4,zone=%d)", - defaultOpenFlowCookie, netConfig.ofPortPatch, physicalIP.IP, config.Default.HostMasqConntrackZone)) - if ofPortPhys != "" { - // We send BFD traffic coming from OVN to outside directly using a higher priority flow - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=650, table=0, in_port=%s, dl_src=%s, udp6, tp_dst=3784, actions=output:%s", - defaultOpenFlowCookie, netConfig.ofPortPatch, bridgeMacAddress, ofPortPhys)) - } - } - } - if ofPortPhys != "" { - // table 0, packets coming from external. Send it through conntrack and - // resubmit to table 1 to know the state and mark of the connection. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=50, in_port=%s, ipv6, "+ - "actions=ct(zone=%d, nat, table=1)", defaultOpenFlowCookie, ofPortPhys, config.Default.ConntrackZone)) - } - } - // Egress IP is often configured on a node different from the one hosting the affected pod. - // Due to the fact that ovn-controllers on different nodes apply the changes independently, - // there is a chance that the pod traffic will reach the egress node before it configures the SNAT flows. - // Drop pod traffic that is not SNATed, excluding local pods(required for ICNIv2) - defaultNetConfig := bridge.netConfig[types.DefaultNetworkName] - if config.OVNKubernetesFeature.EnableEgressIP { - for _, clusterEntry := range config.Default.ClusterSubnets { - cidr := clusterEntry.CIDR - ipv := getIPv(cidr) - // table 0, drop packets coming from pods headed externally that were not SNATed. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=104, in_port=%s, %s, %s_src=%s, actions=drop", - defaultOpenFlowCookie, defaultNetConfig.ofPortPatch, ipv, ipv, cidr)) - } - for _, subnet := range defaultNetConfig.nodeSubnets { - ipv := getIPv(subnet) - if ofPortPhys != "" { - // table 0, commit connections from local pods. - // ICNIv2 requires that local pod traffic can leave the node without SNAT. - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=109, in_port=%s, dl_src=%s, %s, %s_src=%s"+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:%s", - defaultOpenFlowCookie, defaultNetConfig.ofPortPatch, bridgeMacAddress, ipv, ipv, subnet, - config.Default.ConntrackZone, ctMarkOVN, ofPortPhys)) - } - } - } - - if ofPortPhys != "" { - for _, netConfig := range bridge.patchedNetConfigs() { - isNetworkAdvertised := netConfig.advertised.Load() - // disableSNATMultipleGWs only applies to default network - disableSNATMultipleGWs := netConfig.isDefaultNetwork() && config.Gateway.DisableSNATMultipleGWs - if !disableSNATMultipleGWs && !isNetworkAdvertised { - continue - } - output := netConfig.ofPortPatch - if isNetworkAdvertised && config.Gateway.Mode == config.GatewayModeLocal { - // except if advertised through BGP, go to kernel - // TODO: MEG enabled pods should still go through the patch port - // but holding this until - // https://issues.redhat.com/browse/FDP-646 is fixed, for now we - // are assuming MEG & BGP are not used together - output = ovsLocalPort - } - for _, clusterEntry := range netConfig.subnets { - cidr := clusterEntry.CIDR - ipv := getIPv(cidr) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=15, table=1, %s, %s_dst=%s, "+ - "actions=output:%s", - defaultOpenFlowCookie, ipv, ipv, cidr, output)) - } - if output == netConfig.ofPortPatch { - // except node management traffic - for _, subnet := range netConfig.nodeSubnets { - mgmtIP := util.GetNodeManagementIfAddr(subnet) - ipv := getIPv(mgmtIP) - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=16, table=1, %s, %s_dst=%s, "+ - "actions=output:%s", - defaultOpenFlowCookie, ipv, ipv, mgmtIP.IP, ovsLocalPort), - ) - } - } - } - - // table 1, we check to see if this dest mac is the shared mac, if so send to host - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=1, %s dl_dst=%s, actions=%soutput:%s", - defaultOpenFlowCookie, match_vlan, bridgeMacAddress, strip_vlan, ofPortHost)) - - if config.IPv6Mode { - // REMOVEME(trozet) when https://bugzilla.kernel.org/show_bug.cgi?id=11797 is resolved - // must flood icmpv6 Route Advertisement and Neighbor Advertisement traffic as it fails to create a CT entry - for _, icmpType := range []int{types.RouteAdvertisementICMPType, types.NeighborAdvertisementICMPType} { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=14, table=1,icmp6,icmpv6_type=%d actions=FLOOD", - defaultOpenFlowCookie, icmpType)) - } - if ofPortPhys != "" { - // We send BFD traffic both on the host and in ovn - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=13, table=1, in_port=%s, udp6, tp_dst=3784, actions=output:%s,output:%s", - defaultOpenFlowCookie, ofPortPhys, defaultNetConfig.ofPortPatch, ofPortHost)) - } - } - - if config.IPv4Mode { - if ofPortPhys != "" { - // We send BFD traffic both on the host and in ovn - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=13, table=1, in_port=%s, udp, tp_dst=3784, actions=output:%s,output:%s", - defaultOpenFlowCookie, ofPortPhys, defaultNetConfig.ofPortPatch, ofPortHost)) - } - } - - // packets larger than known acceptable MTU need to go to kernel for - // potential fragmentation - // introduced specifically for replies to egress traffic not routed - // through the host - if config.Gateway.Mode == config.GatewayModeLocal && !config.Gateway.DisablePacketMTUCheck { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=10, table=11, reg0=0x1, "+ - "actions=output:%s", defaultOpenFlowCookie, ofPortHost)) - - // Send UDN destined traffic to right patch port - for _, netConfig := range bridge.patchedNetConfigs() { - if netConfig.masqCTMark != ctMarkOVN { - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=5, table=11, ct_mark=%s, "+ - "actions=output:%s", defaultOpenFlowCookie, netConfig.masqCTMark, netConfig.ofPortPatch)) - } - } - - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=1, table=11, "+ - "actions=output:%s", defaultOpenFlowCookie, defaultNetConfig.ofPortPatch)) - } - - // table 1, all other connections do normal processing - dftFlows = append(dftFlows, - fmt.Sprintf("cookie=%s, priority=0, table=1, actions=output:NORMAL", defaultOpenFlowCookie)) - } - - return dftFlows, nil -} - -func pmtudDropFlows(bridge *bridgeConfiguration, ipAddrs []string) []string { - var flows []string - if config.Gateway.Mode != config.GatewayModeShared { - return nil - } - for _, addr := range ipAddrs { - for _, netConfig := range bridge.patchedNetConfigs() { - flows = append(flows, - generateICMPFragmentationFlow(addr, outputPortDrop, netConfig.ofPortPatch, pmtudOpenFlowCookie, 700)) - } - } - - return flows -} - -// ovnToHostNetworkNormalActionFlows returns the flows that allow IP{v4,v6} traffic from the OVN network to the host network -// when the destination is on the same node as the sender. This is necessary for pods in the default network to reach -// localnet pods on the same node, when the localnet is mapped to breth0. The expected srcMAC is the MAC address of breth0 -// and the expected hostSubnets is the host subnets found on the node primary interface. -func ovnToHostNetworkNormalActionFlows(netConfig *bridgeUDNConfiguration, srcMAC string, hostSubnets []*net.IPNet, isV6 bool) []string { - var inPort, ctMark, ipFamily, ipFamilyDest string - var flows []string - - if config.Gateway.Mode == config.GatewayModeShared { - inPort = netConfig.ofPortPatch - ctMark = netConfig.masqCTMark - } else if config.Gateway.Mode == config.GatewayModeLocal { - inPort = "LOCAL" - ctMark = ctMarkHost - } else { - return nil - } - - if isV6 { - ipFamily = "ipv6" - ipFamilyDest = "ipv6_dst" - } else { - ipFamily = "ip" - ipFamilyDest = "nw_dst" - } - - for _, hostSubnet := range hostSubnets { - if (hostSubnet.IP.To4() == nil) != isV6 { - continue - } - // IP traffic from the OVN network to the host network should be handled normally by the bridge instead of - // being output directly to the NIC by the existing flow at prio=100. - flows = append(flows, - fmt.Sprintf("cookie=%s, priority=102, in_port=%s, dl_src=%s, %s, %s=%s, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL", - defaultOpenFlowCookie, - inPort, - srcMAC, - ipFamily, - ipFamilyDest, - hostSubnet.String(), - config.Default.ConntrackZone, - ctMark)) - } - - if isV6 { - // Neighbor discovery in IPv6 happens through ICMPv6 messages to a special destination (ff02::1:ff00:0/104), - // which has nothing to do with the host subnets we're matching against in the flow above at prio=102. - // Let's allow neighbor discovery by matching against icmp type and in_port. - for _, icmpType := range []int{types.NeighborSolicitationICMPType, types.NeighborAdvertisementICMPType} { - flows = append(flows, - fmt.Sprintf("cookie=%s, priority=102, in_port=%s, dl_src=%s, icmp6, icmpv6_type=%d, "+ - "actions=ct(commit, zone=%d, exec(set_field:%s->ct_mark)), output:NORMAL", - defaultOpenFlowCookie, inPort, srcMAC, icmpType, - config.Default.ConntrackZone, ctMark)) - } - } - - return flows -} - -func setBridgeOfPorts(bridge *bridgeConfiguration) error { - bridge.Lock() - defer bridge.Unlock() - // Get ofport of patchPort - for _, netConfig := range bridge.netConfig { - if err := netConfig.setBridgeNetworkOfPortsInternal(); err != nil { - return fmt.Errorf("error setting bridge openflow ports for network with patchport %v: err: %v", netConfig.patchPort, err) - } - } - - if bridge.uplinkName != "" { - // Get ofport of physical interface - ofportPhys, stderr, err := util.GetOVSOfPort("get", "interface", bridge.uplinkName, "ofport") - if err != nil { - return fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", - bridge.uplinkName, stderr, err) - } - bridge.ofPortPhys = ofportPhys - } - - // Get ofport representing the host. That is, host representor port in case of DPUs, ovsLocalPort otherwise. - if config.OvnKubeNode.Mode == types.NodeModeDPU { - var stderr string - hostRep, err := util.GetDPUHostInterface(bridge.bridgeName) - if err != nil { - return err - } - - bridge.ofPortHost, stderr, err = util.RunOVSVsctl("get", "interface", hostRep, "ofport") - if err != nil { - return fmt.Errorf("failed to get ofport of host interface %s, stderr: %q, error: %v", - hostRep, stderr, err) - } - } else { - var err error - if bridge.gwIfaceRep != "" { - bridge.ofPortHost, _, err = util.RunOVSVsctl("get", "interface", bridge.gwIfaceRep, "ofport") - if err != nil { - return fmt.Errorf("failed to get ofport of bypass rep %s, error: %v", bridge.gwIfaceRep, err) - } - } else { - bridge.ofPortHost = ovsLocalPort - } - } - - return nil -} - func newGateway( nodeName string, subnets []*net.IPNet, @@ -2374,7 +1425,9 @@ func newGateway( gatewayMode config.GatewayMode, ) (*gateway, error) { klog.Info("Creating new gateway") - gw := &gateway{} + gw := &gateway{ + nextHops: gwNextHops, + } if gatewayMode == config.GatewayModeLocal { if err := initLocalGateway(subnets, mgmtPort); err != nil { @@ -2391,37 +1444,19 @@ func newGateway( if exGwBridge != nil { gw.readyFunc = func() (bool, error) { - gwBridge.Lock() - for _, netConfig := range gwBridge.netConfig { - ready, err := gatewayReady(netConfig.patchPort) - if err != nil || !ready { - gwBridge.Unlock() - return false, err - } + if !gwBridge.IsGatewayReady() { + return false, nil } - gwBridge.Unlock() - exGwBridge.Lock() - for _, netConfig := range exGwBridge.netConfig { - exGWReady, err := gatewayReady(netConfig.patchPort) - if err != nil || !exGWReady { - exGwBridge.Unlock() - return false, err - } + if !exGwBridge.IsGatewayReady() { + return false, nil } - exGwBridge.Unlock() return true, nil } } else { gw.readyFunc = func() (bool, error) { - gwBridge.Lock() - for _, netConfig := range gwBridge.netConfig { - ready, err := gatewayReady(netConfig.patchPort) - if err != nil || !ready { - gwBridge.Unlock() - return false, err - } + if !gwBridge.IsGatewayReady() { + return false, nil } - gwBridge.Unlock() return true, nil } } @@ -2436,19 +1471,19 @@ func newGateway( // Program cluster.GatewayIntf to let non-pod traffic to go to host // stack klog.Info("Creating Gateway Openflow Manager") - err := setBridgeOfPorts(gwBridge) + err := gwBridge.SetOfPorts() if err != nil { return err } if exGwBridge != nil { - err = setBridgeOfPorts(exGwBridge) + err = exGwBridge.SetOfPorts() if err != nil { return err } } if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && config.Gateway.Mode != config.GatewayModeDisabled { - gw.bridgeEIPAddrManager = newBridgeEIPAddrManager(nodeName, gwBridge.bridgeName, linkManager, kube, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()) - gwBridge.eipMarkIPs = gw.bridgeEIPAddrManager.GetCache() + gw.bridgeEIPAddrManager = egressip.NewBridgeEIPAddrManager(nodeName, gwBridge.GetBridgeName(), linkManager, kube, watchFactory.EgressIPInformer(), watchFactory.NodeCoreInformer()) + gwBridge.SetEIPMarkIPs(gw.bridgeEIPAddrManager.GetCache()) } gw.nodeIPManager = newAddressManager(nodeName, kube, mgmtPort, watchFactory, gwBridge) @@ -2456,15 +1491,15 @@ func newGateway( // Delete stale masquerade resources if there are any. This is to make sure that there // are no Linux resources with IP from old masquerade subnet when masquerade subnet // gets changed as part of day2 operation. - if err := deleteStaleMasqueradeResources(gwBridge.getGatewayIface(), nodeName, watchFactory); err != nil { + if err := deleteStaleMasqueradeResources(gwBridge.GetGatewayIface(), nodeName, watchFactory); err != nil { return fmt.Errorf("failed to remove stale masquerade resources: %w", err) } - if err := setNodeMasqueradeIPOnExtBridge(gwBridge.getGatewayIface()); err != nil { - return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", gwBridge.getGatewayIface(), err) + if err := setNodeMasqueradeIPOnExtBridge(gwBridge.GetGatewayIface()); err != nil { + return fmt.Errorf("failed to set the node masquerade IP on the ext bridge %s: %v", gwBridge.GetGatewayIface(), err) } - if err := addMasqueradeRoute(routeManager, gwBridge.getGatewayIface(), nodeName, gwIPs, watchFactory); err != nil { + if err := addMasqueradeRoute(routeManager, gwBridge.GetGatewayIface(), nodeName, gwIPs, watchFactory); err != nil { return fmt.Errorf("failed to set the node masquerade route to OVN: %v", err) } @@ -2488,7 +1523,7 @@ func newGateway( } if gw.nodePortWatcher != nil { npw, _ := gw.nodePortWatcher.(*nodePortWatcher) - npw.updateGatewayIPs(gw.nodeIPManager) + npw.updateGatewayIPs() } // Services create OpenFlow flows as well, need to update them all if gw.servicesRetryFramework != nil { @@ -2511,7 +1546,7 @@ func newGateway( gw.openflowManager.requestFlowSync() } - if err := addHostMACBindings(gwBridge.getGatewayIface()); err != nil { + if err := addHostMACBindings(gwBridge.GetGatewayIface()); err != nil { return fmt.Errorf("failed to add MAC bindings for service routing: %w", err) } @@ -2523,7 +1558,7 @@ func newGateway( } func newNodePortWatcher( - gwBridge *bridgeConfiguration, + gwBridge *bridgeconfig.BridgeConfiguration, ofm *openflowManager, nodeIPManager *addressManager, watchFactory factory.NodeWatchFactory, @@ -2532,10 +1567,10 @@ func newNodePortWatcher( // Get ofport of physical interface ofportPhys, stderr, err := util.GetOVSOfPort("--if-exists", "get", - "interface", gwBridge.uplinkName, "ofport") + "interface", gwBridge.GetUplinkName(), "ofport") if err != nil { return nil, fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", - gwBridge.uplinkName, stderr, err) + gwBridge.GetUplinkName(), stderr, err) } // In the shared gateway mode, the NodePort service is handled by the OpenFlow flows configured @@ -2573,11 +1608,11 @@ func newNodePortWatcher( subnets = append(subnets, config.Kubernetes.ServiceCIDRs...) if config.Gateway.DisableForwarding { if err := initExternalBridgeServiceForwardingRules(subnets); err != nil { - return nil, fmt.Errorf("failed to add accept rules in forwarding table for bridge %s: err %v", gwBridge.getGatewayIface(), err) + return nil, fmt.Errorf("failed to add accept rules in forwarding table for bridge %s: err %v", gwBridge.GetGatewayIface(), err) } } else { if err := delExternalBridgeServiceForwardingRules(subnets); err != nil { - return nil, fmt.Errorf("failed to delete accept rules in forwarding table for bridge %s: err %v", gwBridge.getGatewayIface(), err) + return nil, fmt.Errorf("failed to delete accept rules in forwarding table for bridge %s: err %v", gwBridge.GetGatewayIface(), err) } } @@ -2588,14 +1623,14 @@ func newNodePortWatcher( } // Get Physical IPs of Node, Can be IPV4 IPV6 or both - gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(gwBridge.ips) + gatewayIPv4, gatewayIPv6 := getGatewayFamilyAddrs(gwBridge.GetIPs()) npw := &nodePortWatcher{ dpuMode: dpuMode, gatewayIPv4: gatewayIPv4, gatewayIPv6: gatewayIPv6, ofportPhys: ofportPhys, - gwBridge: gwBridge.getGatewayIface(), + gwBridge: gwBridge, serviceInfo: make(map[ktypes.NamespacedName]*serviceConfig), nodeIPManager: nodeIPManager, ofm: ofm, @@ -2856,36 +1891,6 @@ func updateMasqueradeAnnotation(nodeName string, kube kube.Interface) error { return nil } -// generateIPFragmentReassemblyFlow adds flows in table 0 that send packets to a -// specific conntrack zone for reassembly with the same priority as node port -// flows that match on L4 fields. After reassembly packets are reinjected to -// table 0 again. This requires a conntrack immplementation that reassembles -// fragments. This reqreuiment is met for the kernel datapath with the netfilter -// module loaded. This reqreuiment is not met for the userspace datapath. -func generateIPFragmentReassemblyFlow(ofPortPhys string) []string { - flows := make([]string, 0, 2) - if config.IPv4Mode { - flows = append(flows, - fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, ip, nw_frag=yes, actions=ct(table=0,zone=%d)", - defaultOpenFlowCookie, - ofPortPhys, - config.Default.ReassemblyConntrackZone, - ), - ) - } - if config.IPv6Mode { - flows = append(flows, - fmt.Sprintf("cookie=%s, priority=110, table=0, in_port=%s, ipv6, nw_frag=yes, actions=ct(table=0,zone=%d)", - defaultOpenFlowCookie, - ofPortPhys, - config.Default.ReassemblyConntrackZone, - ), - ) - } - - return flows -} - // deleteStaleMasqueradeResources removes stale Linux resources when config.Gateway.V4MasqueradeSubnet // or config.Gateway.V6MasqueradeSubnet gets changed at day 2. func deleteStaleMasqueradeResources(bridgeName, nodeName string, wf factory.NodeWatchFactory) error { @@ -3019,14 +2024,6 @@ func deleteMasqueradeResources(link netlink.Link, staleMasqueradeIPs *config.Mas return utilerrors.Join(aggregatedErrors...) } -func getIPv(ipnet *net.IPNet) string { - prefix := "ip" - if utilnet.IsIPv6CIDR(ipnet) { - prefix = "ipv6" - } - return prefix -} - // configureAdvertisedUDNIsolationNFTables configures nftables to drop traffic generated locally towards advertised UDN subnets. // It sets up a nftables chain named nftablesUDNBGPOutputChain in the output hook with filter priority which drops // traffic originating from the local node destined to nftablesAdvertisedUDNsSet. @@ -3046,8 +2043,8 @@ func getIPv(ipnet *net.IPNet) string { // chain udn-bgp-drop { // comment "Drop traffic generated locally towards advertised UDN subnets" // type filter hook output priority filter; policy accept; -// ip daddr @advertised-udn-subnets-v4 counter packets 0 bytes 0 drop -// ip6 daddr @advertised-udn-subnets-v6 counter packets 0 bytes 0 drop +// ct state new ip daddr @advertised-udn-subnets-v4 counter packets 0 bytes 0 drop +// ct state new ip6 daddr @advertised-udn-subnets-v6 counter packets 0 bytes 0 drop // } func configureAdvertisedUDNIsolationNFTables() error { counterIfDebug := "" @@ -3089,11 +2086,11 @@ func configureAdvertisedUDNIsolationNFTables() error { tx.Add(&knftables.Rule{ Chain: nftablesUDNBGPOutputChain, - Rule: knftables.Concat(fmt.Sprintf("ip daddr @%s", nftablesAdvertisedUDNsSetV4), counterIfDebug, "drop"), + Rule: knftables.Concat("ct state new", fmt.Sprintf("ip daddr @%s", nftablesAdvertisedUDNsSetV4), counterIfDebug, "drop"), }) tx.Add(&knftables.Rule{ Chain: nftablesUDNBGPOutputChain, - Rule: knftables.Concat(fmt.Sprintf("ip6 daddr @%s", nftablesAdvertisedUDNsSetV6), counterIfDebug, "drop"), + Rule: knftables.Concat("ct state new", fmt.Sprintf("ip6 daddr @%s", nftablesAdvertisedUDNsSetV6), counterIfDebug, "drop"), }) return nft.Run(context.TODO(), tx) } diff --git a/go-controller/pkg/node/gateway_udn.go b/go-controller/pkg/node/gateway_udn.go index 7b755806fd..f7d2e27a01 100644 --- a/go-controller/pkg/node/gateway_udn.go +++ b/go-controller/pkg/node/gateway_udn.go @@ -5,7 +5,7 @@ import ( "fmt" "net" "slices" - "sync/atomic" + "strings" "time" "github.com/vishvananda/netlink" @@ -91,149 +91,6 @@ type UserDefinedNetworkGateway struct { gwInterfaceIndex int } -// UTILS Needed for UDN (also leveraged for default netInfo) in bridgeConfiguration - -// getBridgePortConfigurations returns a slice of Network port configurations along with the -// uplinkName and physical port's ofport value -func (b *bridgeConfiguration) getBridgePortConfigurations() ([]*bridgeUDNConfiguration, string, string) { - b.Lock() - defer b.Unlock() - var netConfigs []*bridgeUDNConfiguration - for _, netConfig := range b.netConfig { - netConfigs = append(netConfigs, netConfig.shallowCopy()) - } - return netConfigs, b.uplinkName, b.ofPortPhys -} - -// addNetworkBridgeConfig adds the patchport and ctMark value for the provided netInfo into the bridge configuration cache -func (b *bridgeConfiguration) addNetworkBridgeConfig( - nInfo util.NetInfo, - nodeSubnets []*net.IPNet, - masqCTMark, pktMark uint, - v6MasqIPs, v4MasqIPs *udn.MasqueradeIPs) error { - b.Lock() - defer b.Unlock() - - netName := nInfo.GetNetworkName() - patchPort := nInfo.GetNetworkScopedPatchPortName(b.bridgeName, b.nodeName) - - _, found := b.netConfig[netName] - if !found { - netConfig := &bridgeUDNConfiguration{ - patchPort: patchPort, - masqCTMark: fmt.Sprintf("0x%x", masqCTMark), - pktMark: fmt.Sprintf("0x%x", pktMark), - v4MasqIPs: v4MasqIPs, - v6MasqIPs: v6MasqIPs, - subnets: nInfo.Subnets(), - nodeSubnets: nodeSubnets, - } - netConfig.advertised.Store(util.IsPodNetworkAdvertisedAtNode(nInfo, b.nodeName)) - - b.netConfig[netName] = netConfig - } else { - klog.Warningf("Trying to update bridge config for network %s which already"+ - "exists in cache...networks are not mutable...ignoring update", nInfo.GetNetworkName()) - } - return nil -} - -// delNetworkBridgeConfig deletes the provided netInfo from the bridge configuration cache -func (b *bridgeConfiguration) delNetworkBridgeConfig(nInfo util.NetInfo) { - b.Lock() - defer b.Unlock() - - delete(b.netConfig, nInfo.GetNetworkName()) -} - -func (b *bridgeConfiguration) getNetworkBridgeConfig(networkName string) *bridgeUDNConfiguration { - b.Lock() - defer b.Unlock() - return b.netConfig[networkName] -} - -// getActiveNetworkBridgeConfigCopy returns a shallow copy of the network configuration corresponding to the -// provided netInfo. -// -// NOTE: if the network configuration can't be found or if the network is not patched by OVN -// yet this returns nil. -func (b *bridgeConfiguration) getActiveNetworkBridgeConfigCopy(networkName string) *bridgeUDNConfiguration { - b.Lock() - defer b.Unlock() - - if netConfig, found := b.netConfig[networkName]; found && netConfig.ofPortPatch != "" { - return netConfig.shallowCopy() - } - return nil -} - -func (b *bridgeConfiguration) patchedNetConfigs() []*bridgeUDNConfiguration { - result := make([]*bridgeUDNConfiguration, 0, len(b.netConfig)) - for _, netConfig := range b.netConfig { - if netConfig.ofPortPatch == "" { - continue - } - result = append(result, netConfig) - } - return result -} - -// END UDN UTILs for bridgeConfiguration - -// bridgeUDNConfiguration holds the patchport and ctMark -// information for a given network -type bridgeUDNConfiguration struct { - patchPort string - ofPortPatch string - masqCTMark string - pktMark string - v4MasqIPs *udn.MasqueradeIPs - v6MasqIPs *udn.MasqueradeIPs - subnets []config.CIDRNetworkEntry - nodeSubnets []*net.IPNet - advertised atomic.Bool -} - -func (netConfig *bridgeUDNConfiguration) shallowCopy() *bridgeUDNConfiguration { - copy := &bridgeUDNConfiguration{ - patchPort: netConfig.patchPort, - ofPortPatch: netConfig.ofPortPatch, - masqCTMark: netConfig.masqCTMark, - pktMark: netConfig.pktMark, - v4MasqIPs: netConfig.v4MasqIPs, - v6MasqIPs: netConfig.v6MasqIPs, - subnets: netConfig.subnets, - nodeSubnets: netConfig.nodeSubnets, - } - netConfig.advertised.Store(netConfig.advertised.Load()) - return copy -} - -func (netConfig *bridgeUDNConfiguration) isDefaultNetwork() bool { - return netConfig.masqCTMark == ctMarkOVN -} - -func (netConfig *bridgeUDNConfiguration) setBridgeNetworkOfPortsInternal() error { - ofportPatch, stderr, err := util.GetOVSOfPort("get", "Interface", netConfig.patchPort, "ofport") - if err != nil { - return fmt.Errorf("failed while waiting on patch port %q to be created by ovn-controller and "+ - "while getting ofport. stderr: %v, error: %v", netConfig.patchPort, stderr, err) - } - netConfig.ofPortPatch = ofportPatch - return nil -} - -func setBridgeNetworkOfPorts(bridge *bridgeConfiguration, netName string) error { - bridge.Lock() - defer bridge.Unlock() - - netConfig, found := bridge.netConfig[netName] - if !found { - return fmt.Errorf("failed to find network %s configuration on bridge %s", netName, bridge.bridgeName) - } - return netConfig.setBridgeNetworkOfPortsInternal() -} - func NewUserDefinedNetworkGateway(netInfo util.NetInfo, node *corev1.Node, nodeLister listers.NodeLister, kubeInterface kube.Interface, vrfManager *vrfmanager.Controller, ruleManager *iprulemanager.Controller, defaultNetworkGateway Gateway) (*UserDefinedNetworkGateway, error) { @@ -267,7 +124,7 @@ func NewUserDefinedNetworkGateway(netInfo util.NetInfo, node *corev1.Node, nodeL if gw.openflowManager == nil { return nil, fmt.Errorf("openflow manager has not been provided for network: %s", netInfo.GetNetworkName()) } - intfName := gw.openflowManager.defaultBridge.getGatewayIface() + intfName := gw.openflowManager.defaultBridge.GetGatewayIface() link, err := util.GetNetLinkOps().LinkByName(intfName) if err != nil { return nil, fmt.Errorf("unable to get link for %s, error: %v", intfName, err) @@ -305,7 +162,9 @@ func (udng *UserDefinedNetworkGateway) delMarkChain() error { chain := &knftables.Chain{ Name: GetUDNMarkChain(fmt.Sprintf("0x%x", udng.pktMark)), } - tx.Flush(chain) + // Delete would return an error if we tried to delete a chain that didn't exist, so + // we do an Add first (which is a no-op if the chain already exists) and then Delete. + tx.Add(chain) tx.Delete(chain) return nft.Run(context.TODO(), tx) } @@ -397,12 +256,12 @@ func (udng *UserDefinedNetworkGateway) AddNetwork() error { waiter := newStartupWaiterWithTimeout(waitForPatchPortTimeout) readyFunc := func() (bool, error) { - if err := setBridgeNetworkOfPorts(udng.openflowManager.defaultBridge, udng.GetNetworkName()); err != nil { + if err := udng.openflowManager.defaultBridge.SetNetworkOfPatchPort(udng.GetNetworkName()); err != nil { klog.V(3).Infof("Failed to set network %s's openflow ports for default bridge; error: %v", udng.GetNetworkName(), err) return false, nil } if udng.openflowManager.externalGatewayBridge != nil { - if err := setBridgeNetworkOfPorts(udng.openflowManager.externalGatewayBridge, udng.GetNetworkName()); err != nil { + if err := udng.openflowManager.externalGatewayBridge.SetNetworkOfPatchPort(udng.GetNetworkName()); err != nil { klog.V(3).Infof("Failed to set network %s's openflow ports for secondary bridge; error: %v", udng.GetNetworkName(), err) return false, nil } @@ -520,8 +379,10 @@ func (udng *UserDefinedNetworkGateway) addUDNManagementPort() (netlink.Link, err // STEP3 // IPv6 forwarding is enabled globally if ipv4, _ := udng.IPMode(); ipv4 { - stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net.ipv4.conf.%s.forwarding=1", interfaceName)) - if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.forwarding = 1", interfaceName) { + // we use forward slash as path separator to allow dotted interfaceName e.g. foo.200 + stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net/ipv4/conf/%s/forwarding=1", interfaceName)) + // systctl output enforces dot as path separator + if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.forwarding = 1", strings.ReplaceAll(interfaceName, ".", "/")) { return nil, fmt.Errorf("could not set the correct forwarding value for interface %s: stdout: %v, stderr: %v, err: %v", interfaceName, stdout, stderr, err) } @@ -740,7 +601,7 @@ func (udng *UserDefinedNetworkGateway) getDefaultRoute(isNetworkAdvertised bool) var retVal []netlink.Route var defaultAnyCIDR *net.IPNet - for _, nextHop := range udng.gateway.openflowManager.defaultBridge.nextHops { + for _, nextHop := range udng.gateway.nextHops { isV6 := utilnet.IsIPv6(nextHop) _, defaultAnyCIDR, _ = net.ParseCIDR("0.0.0.0/0") if isV6 { @@ -783,17 +644,20 @@ func (udng *UserDefinedNetworkGateway) getV6MasqueradeIP() (*net.IPNet, error) { // constructUDNVRFIPRules constructs rules that redirect matching packets // into the corresponding UDN VRF routing table. -// If the network is not advertised, an example of the rules we set for a -// network is: -// 2000: from all fwmark 0x1001 lookup 1007 -// 2000: from all to 169.254.0.12 lookup 1007 -// 2000: from all fwmark 0x1002 lookup 1009 -// 2000: from all to 169.254.0.14 lookup 1009 -// If the network is advertised, an example of the rules we set for a network is: +// If the network is not advertised, an example of the rules we set for two +// networks is: +// 2000: from all fwmark 0x1001 lookup 1007 +// 2000: from all to 169.254.0.12 lookup 1007 +// 2000: from all fwmark 0x1002 lookup 1009 +// 2000: from all to 169.254.0.14 lookup 1009 +// If the network is advertised, an example of the rules we set for two +// networks is: // 2000: from all fwmark 0x1001 lookup 1007 // 2000: from all to 10.132.0.0/14 lookup 1007 +// 2000: from all to 169.254.0.12 lookup 1007 // 2000: from all fwmark 0x1001 lookup 1009 // 2000: from all to 10.134.0.0/14 lookup 1009 +// 2000: from all to 169.254.0.14 lookup 1009 func (udng *UserDefinedNetworkGateway) constructUDNVRFIPRules(isNetworkAdvertised bool) ([]netlink.Rule, []netlink.Rule, error) { var addIPRules []netlink.Rule var delIPRules []netlink.Rule @@ -832,7 +696,7 @@ func (udng *UserDefinedNetworkGateway) constructUDNVRFIPRules(isNetworkAdvertise delIPRules = append(delIPRules, subnetIPRules...) default: addIPRules = append(addIPRules, subnetIPRules...) - delIPRules = append(delIPRules, masqIPRules...) + addIPRules = append(addIPRules, masqIPRules...) } return addIPRules, delIPRules, nil } @@ -879,8 +743,10 @@ func addRPFilterLooseModeForManagementPort(mgmtPortName string) error { rpFilterLooseMode := "2" // TODO: Convert testing framework to mock golang module utilities. Example: // result, err := sysctl.Sysctl(fmt.Sprintf("net/ipv4/conf/%s/rp_filter", types.K8sMgmtIntfName), rpFilterLooseMode) - stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net.ipv4.conf.%s.rp_filter=%s", mgmtPortName, rpFilterLooseMode)) - if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.rp_filter = %s", mgmtPortName, rpFilterLooseMode) { + // we use forward slash as path separator to allow dotted mgmtPortName e.g. foo.200 + stdout, stderr, err := util.RunSysctl("-w", fmt.Sprintf("net/ipv4/conf/%s/rp_filter=%s", mgmtPortName, rpFilterLooseMode)) + // systctl output enforces dot as path separator + if err != nil || stdout != fmt.Sprintf("net.ipv4.conf.%s.rp_filter = %s", strings.ReplaceAll(mgmtPortName, ".", "/"), rpFilterLooseMode) { return fmt.Errorf("could not set the correct rp_filter value for interface %s: stdout: %v, stderr: %v, err: %v", mgmtPortName, stdout, stderr, err) } @@ -930,11 +796,11 @@ func (udng *UserDefinedNetworkGateway) doReconcile() error { // update bridge configuration isNetworkAdvertised := util.IsPodNetworkAdvertisedAtNode(udng.NetInfo, udng.node.Name) - netConfig := udng.openflowManager.defaultBridge.getNetworkBridgeConfig(udng.GetNetworkName()) + netConfig := udng.openflowManager.defaultBridge.GetNetworkConfig(udng.GetNetworkName()) if netConfig == nil { return fmt.Errorf("missing bridge configuration for network %s", udng.GetNetworkName()) } - netConfig.advertised.Store(isNetworkAdvertised) + netConfig.Advertised.Store(isNetworkAdvertised) if err := udng.updateUDNVRFIPRules(isNetworkAdvertised); err != nil { return fmt.Errorf("error while updating ip rule for UDN %s: %s", udng.GetNetworkName(), err) diff --git a/go-controller/pkg/node/gateway_udn_test.go b/go-controller/pkg/node/gateway_udn_test.go index 34673210ca..bd05aacd57 100644 --- a/go-controller/pkg/node/gateway_udn_test.go +++ b/go-controller/pkg/node/gateway_udn_test.go @@ -21,7 +21,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" - utilnet "k8s.io/utils/net" "k8s.io/utils/ptr" "sigs.k8s.io/knftables" @@ -32,6 +31,7 @@ import ( factoryMocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory/mocks" kubemocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube/mocks" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/iprulemanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" @@ -59,14 +59,14 @@ func getCreationFakeCommands(fexec *ovntest.FakeExec, mgtPort, mgtPortMAC, netNa }) fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "sysctl -w net.ipv4.conf." + mgtPort + ".forwarding=1", + Cmd: "sysctl -w net/ipv4/conf/" + mgtPort + "/forwarding=1", Output: "net.ipv4.conf." + mgtPort + ".forwarding = 1", }) } func getRPFilterLooseModeFakeCommands(fexec *ovntest.FakeExec) { fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "sysctl -w net.ipv4.conf.ovn-k8s-mp3.rp_filter=2", + Cmd: "sysctl -w net/ipv4/conf/ovn-k8s-mp3/rp_filter=2", Output: "net.ipv4.conf.ovn-k8s-mp3.rp_filter = 2", }) } @@ -148,7 +148,7 @@ func setUpGatewayFakeOVSCommands(fexec *ovntest.FakeExec) { }) if config.IPv4Mode { fexec.AddFakeCmd(&ovntest.ExpectedCmd{ - Cmd: "sysctl -w net.ipv4.conf.breth0.forwarding=1", + Cmd: "sysctl -w net/ipv4/conf/breth0/forwarding=1", Output: "net.ipv4.conf.breth0.forwarding = 1", }) } @@ -171,6 +171,9 @@ func setUpGatewayFakeOVSCommands(fexec *ovntest.FakeExec) { Cmd: "ovs-vsctl --timeout=15 --if-exists get Open_vSwitch . other_config:hw-offload", Output: "false", }) + fexec.AddFakeCmdsNoOutputNoError([]string{ + "ovs-appctl --timeout=15 fdb/add breth0 breth0 0 00:00:00:55:66:99", + }) fexec.AddFakeCmd(&ovntest.ExpectedCmd{ Cmd: "ovs-vsctl --timeout=15 get Interface patch-breth0_worker1-to-br-int ofport", Output: "5", @@ -234,115 +237,13 @@ func openflowManagerCheckPorts(ofMgr *openflowManager) { GinkgoHelper() netConfigs, uplink, ofPortPhys := ofMgr.getDefaultBridgePortConfigurations() sort.SliceStable(netConfigs, func(i, j int) bool { - return netConfigs[i].patchPort < netConfigs[j].patchPort + return netConfigs[i].PatchPort < netConfigs[j].PatchPort }) Expect(checkPorts(netConfigs, uplink, ofPortPhys)).To(Succeed()) } -func checkDefaultSvcIsolationOVSFlows(flows []string, defaultConfig *bridgeUDNConfiguration, ofPortHost, bridgeMAC string, svcCIDR *net.IPNet) { - By(fmt.Sprintf("Checking default service isolation flows for %s", svcCIDR.String())) - - var masqIP string - var masqSubnet string - var protoPrefix string - if utilnet.IsIPv4CIDR(svcCIDR) { - protoPrefix = "ip" - masqIP = config.Gateway.MasqueradeIPs.V4HostMasqueradeIP.String() - masqSubnet = config.Gateway.V4MasqueradeSubnet - } else { - protoPrefix = "ip6" - masqIP = config.Gateway.MasqueradeIPs.V6HostMasqueradeIP.String() - masqSubnet = config.Gateway.V6MasqueradeSubnet - } - - var nTable0DefaultFlows int - var nTable0UDNMasqFlows int - var nTable2Flows int - for _, flow := range flows { - if strings.Contains(flow, fmt.Sprintf("priority=500, in_port=%s, %s, %s_dst=%s, actions=ct(commit,zone=%d,nat(src=%s),table=2)", - ofPortHost, protoPrefix, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone, - masqIP)) { - nTable0DefaultFlows++ - } else if strings.Contains(flow, fmt.Sprintf("priority=550, in_port=%s, %s, %s_src=%s, %s_dst=%s, actions=ct(commit,zone=%d,table=2)", - ofPortHost, protoPrefix, protoPrefix, masqSubnet, protoPrefix, svcCIDR, config.Default.HostMasqConntrackZone)) { - nTable0UDNMasqFlows++ - } else if strings.Contains(flow, fmt.Sprintf("priority=100, table=2, actions=set_field:%s->eth_dst,output:%s", - bridgeMAC, defaultConfig.ofPortPatch)) { - nTable2Flows++ - } - } - - Expect(nTable0DefaultFlows).To(Equal(1)) - Expect(nTable0UDNMasqFlows).To(Equal(1)) - Expect(nTable2Flows).To(Equal(1)) -} - -func checkAdvertisedUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDNConfiguration, netName, bridgeMAC string, svcCIDR *net.IPNet, expectedNFlows int) { - By(fmt.Sprintf("Checking advertsised UDN %s service isolation flows for %s; expected %d flows", - netName, svcCIDR.String(), expectedNFlows)) - - var matchingIPFamilySubnet *net.IPNet - var protoPrefix string - var udnAdvertisedSubnets []*net.IPNet - var err error - for _, clusterEntry := range netConfig.subnets { - udnAdvertisedSubnets = append(udnAdvertisedSubnets, clusterEntry.CIDR) - } - if utilnet.IsIPv4CIDR(svcCIDR) { - matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) - Expect(err).ToNot(HaveOccurred()) - protoPrefix = "ip" - } else { - matchingIPFamilySubnet, err = util.MatchFirstIPNetFamily(false, udnAdvertisedSubnets) - Expect(err).ToNot(HaveOccurred()) - protoPrefix = "ip6" - } - - var nFlows int - for _, flow := range flows { - if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=set_field:%s->eth_dst,output:%s", - protoPrefix, protoPrefix, matchingIPFamilySubnet, bridgeMAC, netConfig.ofPortPatch)) { - nFlows++ - } - if strings.Contains(flow, fmt.Sprintf("priority=550, in_port=LOCAL, %s, %s_src=%s, %s_dst=%s, actions=ct(commit,zone=64001,table=2)", - protoPrefix, protoPrefix, matchingIPFamilySubnet, protoPrefix, svcCIDR)) { - nFlows++ - } - } - - Expect(nFlows).To(Equal(expectedNFlows)) -} - -func checkUDNSvcIsolationOVSFlows(flows []string, netConfig *bridgeUDNConfiguration, netName, bridgeMAC string, svcCIDR *net.IPNet, expectedNFlows int) { - By(fmt.Sprintf("Checking UDN %s service isolation flows for %s; expected %d flows", - netName, svcCIDR.String(), expectedNFlows)) - - var mgmtMasqIP string - var protoPrefix string - if utilnet.IsIPv4CIDR(svcCIDR) { - mgmtMasqIP = netConfig.v4MasqIPs.ManagementPort.IP.String() - protoPrefix = "ip" - } else { - mgmtMasqIP = netConfig.v6MasqIPs.ManagementPort.IP.String() - protoPrefix = "ip6" - } - - var nFlows int - for _, flow := range flows { - if strings.Contains(flow, fmt.Sprintf("priority=200, table=2, %s, %s_src=%s, actions=set_field:%s->eth_dst,output:%s", - protoPrefix, protoPrefix, mgmtMasqIP, bridgeMAC, netConfig.ofPortPatch)) { - nFlows++ - } - } - - Expect(nFlows).To(Equal(expectedNFlows)) -} - func getDummyOpenflowManager() *openflowManager { - gwBridge := &bridgeConfiguration{ - gwIface: "", - bridgeName: "breth0", - } + gwBridge := bridgeconfig.TestBridgeConfig("breth0") ofm := &openflowManager{ defaultBridge: gwBridge, } @@ -459,7 +360,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { mgtPortMAC = util.IPAddrToHWAddr(util.GetNodeManagementIfAddr(ipNet).IP).String() getCreationFakeCommands(fexec, mgtPort, mgtPortMAC, netName, nodeName, netInfo.MTU()) nodeLister.On("Get", mock.AnythingOfType("string")).Return(node, nil) - factoryMock.On("GetNode", "worker1").Return(node, nil) + factoryMock.On("GetNodeForWindows", "worker1").Return(node, nil) err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() @@ -502,7 +403,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(err).NotTo(HaveOccurred()) getDeletionFakeOVSCommands(fexec, mgtPort) nodeLister.On("Get", mock.AnythingOfType("string")).Return(node, nil) - factoryMock.On("GetNode", "worker1").Return(node, nil) + factoryMock.On("GetNodeForWindows", "worker1").Return(node, nil) cnode := node.DeepCopy() kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation err = testNS.Do(func(ns.NetNS) error { @@ -538,7 +439,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { mgtPortMAC = util.IPAddrToHWAddr(util.GetNodeManagementIfAddr(ipNet).IP).String() getCreationFakeCommands(fexec, mgtPort, mgtPortMAC, netName, nodeName, netInfo.MTU()) nodeLister.On("Get", mock.AnythingOfType("string")).Return(node, nil) - factoryMock.On("GetNode", "worker1").Return(node, nil) + factoryMock.On("GetNodeForWindows", "worker1").Return(node, nil) err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() ofm := getDummyOpenflowManager() @@ -580,7 +481,7 @@ var _ = Describe("UserDefinedNetworkGateway", func() { Expect(err).NotTo(HaveOccurred()) getDeletionFakeOVSCommands(fexec, mgtPort) nodeLister.On("Get", mock.AnythingOfType("string")).Return(node, nil) - factoryMock.On("GetNode", "worker1").Return(node, nil) + factoryMock.On("GetNodeForWindows", "worker1").Return(node, nil) cnode := node.DeepCopy() kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation err = testNS.Do(func(ns.NetNS) error { @@ -766,22 +667,22 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } Expect(udnFlows).To(Equal(0)) - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // only default network + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // only default network Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(2)) // default network + UDN network - defaultUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["default"] - bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["bluenet"] - bridgeMAC := udnGateway.openflowManager.defaultBridge.macAddress.String() - ofPortHost := udnGateway.openflowManager.defaultBridge.ofPortHost + Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(2)) // default network + UDN network + defaultUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("default") + bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("bluenet") + bridgeMAC := udnGateway.openflowManager.defaultBridge.GetMAC().String() + ofPortHost := udnGateway.openflowManager.defaultBridge.GetOfPortHost() for _, flows := range flowMap { for _, flow := range flows { if strings.Contains(flow, fmt.Sprintf("0x%x", udnGateway.masqCTMark)) { // UDN Flow udnFlows++ - } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.ofPortPatch)) { + } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.OfPortPatch)) { udnFlows++ } } @@ -791,10 +692,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect exactly one flow per UDN for table 2 for service isolation. - checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", bridgeMAC, svcCIDR, 1) + bridgeconfig.CheckUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 1) } // The second call to checkPorts() will return no ofPort for the UDN - simulating a deletion that already was @@ -806,8 +707,8 @@ var _ = Describe("UserDefinedNetworkGateway", func() { kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // default network only + Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // default network only udnFlows = 0 for _, flows := range flowMap { for _, flow := range flows { @@ -821,10 +722,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect no more flows per UDN for table 2 for service isolation. - checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", bridgeMAC, svcCIDR, 0) + bridgeconfig.CheckUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) } return nil }) @@ -997,22 +898,22 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } Expect(udnFlows).To(Equal(0)) - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // only default network + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // only default network Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(2)) // default network + UDN network - defaultUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["default"] - bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["bluenet"] - bridgeMAC := udnGateway.openflowManager.defaultBridge.macAddress.String() - ofPortHost := udnGateway.openflowManager.defaultBridge.ofPortHost + Expect(flowMap["DEFAULT"]).To(HaveLen(64)) // 18 UDN Flows are added by default + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(2)) // default network + UDN network + defaultUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("default") + bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("bluenet") + bridgeMAC := udnGateway.openflowManager.defaultBridge.GetMAC().String() + ofPortHost := udnGateway.openflowManager.defaultBridge.GetOfPortHost() for _, flows := range flowMap { for _, flow := range flows { if strings.Contains(flow, fmt.Sprintf("0x%x", udnGateway.masqCTMark)) { // UDN Flow udnFlows++ - } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.ofPortPatch)) { + } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.OfPortPatch)) { udnFlows++ } } @@ -1022,10 +923,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect exactly one flow per UDN for tables 0 and 2 for service isolation. - checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", bridgeMAC, svcCIDR, 1) + bridgeconfig.CheckUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 1) } // The second call to checkPorts() will return no ofPort for the UDN - simulating a deletion that already was @@ -1037,8 +938,8 @@ var _ = Describe("UserDefinedNetworkGateway", func() { kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // default network only + Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // default network only udnFlows = 0 for _, flows := range flowMap { for _, flow := range flows { @@ -1052,10 +953,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect no more flows per UDN for tables 0 and 2 for service isolation. - checkUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", bridgeMAC, svcCIDR, 0) + bridgeconfig.CheckUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) } return nil }) @@ -1238,22 +1139,22 @@ var _ = Describe("UserDefinedNetworkGateway", func() { } } Expect(udnFlows).To(Equal(0)) - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // only default network + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // only default network Expect(udnGateway.AddNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(69)) // 18 UDN Flows and 5 advertisedUDN flows are added by default - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(2)) // default network + UDN network - defaultUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["default"] - bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.netConfig["bluenet"] - bridgeMAC := udnGateway.openflowManager.defaultBridge.macAddress.String() - ofPortHost := udnGateway.openflowManager.defaultBridge.ofPortHost + Expect(flowMap["DEFAULT"]).To(HaveLen(71)) // 18 UDN Flows, 5 advertisedUDN flows, and 2 packet mark flows (IPv4+IPv6) are added by default + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(2)) // default network + UDN network + defaultUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("default") + bridgeUdnConfig := udnGateway.openflowManager.defaultBridge.GetNetworkConfig("bluenet") + bridgeMAC := udnGateway.openflowManager.defaultBridge.GetMAC().String() + ofPortHost := udnGateway.openflowManager.defaultBridge.GetOfPortHost() for _, flows := range flowMap { for _, flow := range flows { if strings.Contains(flow, fmt.Sprintf("0x%x", udnGateway.masqCTMark)) { // UDN Flow udnFlows++ - } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.ofPortPatch)) { + } else if strings.Contains(flow, fmt.Sprintf("in_port=%s", bridgeUdnConfig.OfPortPatch)) { udnFlows++ } } @@ -1263,10 +1164,12 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) - // Expect exactly one flow per advertised UDN for table 2 and table 0 for service isolation. - checkAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", bridgeMAC, svcCIDR, 2) + // Expect exactly two flow per advertised UDN for table 2 and table 0 for service isolation. + // but one of the flows used by advertised UDNs is already tracked and used by default UDNs hence not + // counted here but in the check above for default svc isolation flows. + bridgeconfig.CheckAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 2) } // The second call to checkPorts() will return no ofPort for the UDN - simulating a deletion that already was @@ -1278,8 +1181,8 @@ var _ = Describe("UserDefinedNetworkGateway", func() { kubeMock.On("UpdateNodeStatus", cnode).Return(nil) // check if network key gets deleted from annotation Expect(udnGateway.DelNetwork()).To(Succeed()) flowMap = udnGateway.gateway.openflowManager.flowCache - Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present - Expect(udnGateway.openflowManager.defaultBridge.netConfig).To(HaveLen(1)) // default network only + Expect(flowMap["DEFAULT"]).To(HaveLen(46)) // only default network flows are present + Expect(udnGateway.openflowManager.defaultBridge.GetNetConfigLen()).To(Equal(1)) // default network only udnFlows = 0 for _, flows := range flowMap { for _, flow := range flows { @@ -1293,10 +1196,10 @@ var _ = Describe("UserDefinedNetworkGateway", func() { for _, svcCIDR := range config.Kubernetes.ServiceCIDRs { // Check flows for default network service CIDR. - checkDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) + bridgeconfig.CheckDefaultSvcIsolationOVSFlows(flowMap["DEFAULT"], defaultUdnConfig, ofPortHost, bridgeMAC, svcCIDR) // Expect no more flows per UDN for table 2 and table0 for service isolation. - checkAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", bridgeMAC, svcCIDR, 0) + bridgeconfig.CheckAdvertisedUDNSvcIsolationOVSFlows(flowMap["DEFAULT"], bridgeUdnConfig, "bluenet", svcCIDR, 0) } return nil }) @@ -1479,8 +1382,8 @@ var _ = Describe("UserDefinedNetworkGateway", func() { err = testNS.Do(func(ns.NetNS) error { defer GinkgoRecover() ofm := getDummyOpenflowManager() - ofm.defaultBridge.nextHops = ovntest.MustParseIPs(config.Gateway.NextHop) - udnGateway, err := NewUserDefinedNetworkGateway(mutableNetInfo, node, nil, nil, vrf, nil, &gateway{openflowManager: ofm}) + udnGateway, err := NewUserDefinedNetworkGateway(mutableNetInfo, node, nil, nil, vrf, nil, + &gateway{openflowManager: ofm, nextHops: ovntest.MustParseIPs(config.Gateway.NextHop)}) Expect(err).NotTo(HaveOccurred()) mplink, err := netlink.LinkByName(mgtPort) Expect(err).NotTo(HaveOccurred()) @@ -1724,7 +1627,6 @@ func TestConstructUDNVRFIPRules(t *testing.T) { cidr := "" if config.IPv4Mode { cidr = "100.128.0.0/16/24" - } if config.IPv4Mode && config.IPv6Mode { cidr += ",ae70::/60/64" @@ -1810,8 +1712,6 @@ func TestConstructUDNVRFIPRulesPodNetworkAdvertised(t *testing.T) { table: 1007, dst: *ovntest.MustParseIPNet("100.128.0.0/16"), }, - }, - deleteRules: []testRule{ { priority: UDNMasqueradeIPRulePriority, family: netlink.FAMILY_V4, @@ -1837,8 +1737,6 @@ func TestConstructUDNVRFIPRulesPodNetworkAdvertised(t *testing.T) { table: 1009, dst: *ovntest.MustParseIPNet("ae70::/60"), }, - }, - deleteRules: []testRule{ { priority: UDNMasqueradeIPRulePriority, family: netlink.FAMILY_V6, @@ -1876,8 +1774,6 @@ func TestConstructUDNVRFIPRulesPodNetworkAdvertised(t *testing.T) { table: 1010, dst: *ovntest.MustParseIPNet("ae70::/60"), }, - }, - deleteRules: []testRule{ { priority: UDNMasqueradeIPRulePriority, family: netlink.FAMILY_V4, @@ -1912,9 +1808,9 @@ func TestConstructUDNVRFIPRulesPodNetworkAdvertised(t *testing.T) { cidr = "100.128.0.0/16/24" } if config.IPv4Mode && config.IPv6Mode { - cidr += ",ae70::/60" + cidr += ",ae70::/60/64" } else if config.IPv6Mode { - cidr = "ae70::/60" + cidr = "ae70::/60/64" } nad := ovntest.GenerateNAD("bluenet", "rednad", "greenamespace", types.Layer3Topology, cidr, types.NetworkRolePrimary) @@ -1943,6 +1839,8 @@ func TestConstructUDNVRFIPRulesPodNetworkAdvertised(t *testing.T) { udnGateway.vrfTableId = test.vrftableID rules, delRules, err := udnGateway.constructUDNVRFIPRules(true) g.Expect(err).ToNot(HaveOccurred()) + g.Expect(rules).To(HaveLen(len(test.expectedRules))) + g.Expect(delRules).To(HaveLen(len(test.deleteRules))) for i, rule := range rules { g.Expect(rule.Priority).To(Equal(test.expectedRules[i].priority)) g.Expect(rule.Table).To(Equal(test.expectedRules[i].table)) diff --git a/go-controller/pkg/node/helper_linux.go b/go-controller/pkg/node/helper_linux.go index 5e55173a4a..8b46f05315 100644 --- a/go-controller/pkg/node/helper_linux.go +++ b/go-controller/pkg/node/helper_linux.go @@ -153,23 +153,6 @@ func getDefaultGatewayInterfaceByFamily(family int, gwIface string) (string, net return "", net.IP{}, nil } -func getIntfName(gatewayIntf string) (string, error) { - // The given (or autodetected) interface is an OVS bridge and this could be - // created by us using util.NicToBridge() or it was pre-created by the user. - - // Is intfName a port of gatewayIntf? - intfName, err := util.GetNicName(gatewayIntf) - if err != nil { - return "", err - } - _, stderr, err := util.RunOVSVsctl("get", "interface", intfName, "ofport") - if err != nil { - return "", fmt.Errorf("failed to get ofport of %s, stderr: %q, error: %v", - intfName, stderr, err) - } - return intfName, nil -} - // filterRoutesByIfIndex is a helper function that will sieve the provided routes and check // if they match the provided index. This used to be implemented with netlink.RT_FILTER_OIF, // however the problem is that this filtered out MultiPath IPv6 routes which have a LinkIndex of 0. diff --git a/go-controller/pkg/node/managementport/port_linux.go b/go-controller/pkg/node/managementport/port_linux.go index 378e09b238..f6ea8676dd 100644 --- a/go-controller/pkg/node/managementport/port_linux.go +++ b/go-controller/pkg/node/managementport/port_linux.go @@ -101,7 +101,7 @@ func NewManagementPortController( } // setup NFT sets early as gateway initialization depends on it - err = setupManagementPortNFTSets() + err = SetupManagementPortNFTSets() if err != nil { return nil, err } @@ -299,7 +299,7 @@ func setupManagementPortConfig(link netlink.Link, cfg *managementPortConfig, rou // setupManagementPortNFTSets sets up the NFT sets that the management port SNAR // rules rely on. These sets are written to by other componets so they are setup // independantly and as early as possible. -func setupManagementPortNFTSets() error { +func SetupManagementPortNFTSets() error { nft, err := nodenft.GetNFTablesHelper() if err != nil { return err @@ -321,6 +321,18 @@ func setupManagementPortNFTSets() error { Comment: knftables.PtrTo("eTP:Local short-circuit not subject to management port SNAT (IPv6)"), Type: "ipv6_addr . inet_proto . inet_service", }) + tx.Add(&knftables.Set{ + Name: types.NFTMgmtPortNoSNATSubnetsV4, + Comment: knftables.PtrTo("subnets not subject to management port SNAT (IPv4)"), + Type: "ipv4_addr", + Flags: []knftables.SetFlag{knftables.IntervalFlag}, + }) + tx.Add(&knftables.Set{ + Name: types.NFTMgmtPortNoSNATSubnetsV6, + Comment: knftables.PtrTo("subnets not subject to management port SNAT (IPv6)"), + Type: "ipv6_addr", + Flags: []knftables.SetFlag{knftables.IntervalFlag}, + }) err = nft.Run(context.TODO(), tx) if err != nil { @@ -402,6 +414,14 @@ func setupManagementPortNFTChain(interfaceName string, cfg *managementPortConfig "return", ), }) + tx.Add(&knftables.Rule{ + Chain: nftMgmtPortChain, + Rule: knftables.Concat( + "ip saddr", "@", types.NFTMgmtPortNoSNATSubnetsV4, + counterIfDebug, + "return", + ), + }) tx.Add(&knftables.Rule{ Chain: nftMgmtPortChain, Rule: knftables.Concat( @@ -441,6 +461,14 @@ func setupManagementPortNFTChain(interfaceName string, cfg *managementPortConfig "return", ), }) + tx.Add(&knftables.Rule{ + Chain: nftMgmtPortChain, + Rule: knftables.Concat( + "ip6 saddr", "@", types.NFTMgmtPortNoSNATSubnetsV6, + counterIfDebug, + "return", + ), + }) tx.Add(&knftables.Rule{ Chain: nftMgmtPortChain, Rule: knftables.Concat( @@ -457,6 +485,57 @@ func setupManagementPortNFTChain(interfaceName string, cfg *managementPortConfig return nil } +func UpdateNoSNATSubnetsSets(node *corev1.Node, getSubnetsFn func(*corev1.Node) ([]string, error)) error { + subnetsList, err := getSubnetsFn(node) + if err != nil { + return fmt.Errorf("error retrieving subnets list: %w", err) + } + + subNetV4 := make([]*knftables.Element, 0) + subNetV6 := make([]*knftables.Element, 0) + + for _, subnet := range subnetsList { + if utilnet.IPFamilyOfCIDRString(subnet) == utilnet.IPv4 { + subNetV4 = append(subNetV4, + &knftables.Element{ + Set: types.NFTMgmtPortNoSNATSubnetsV4, + Key: []string{subnet}, + }, + ) + } + if utilnet.IPFamilyOfCIDRString(subnet) == utilnet.IPv6 { + subNetV6 = append(subNetV6, + &knftables.Element{ + Set: types.NFTMgmtPortNoSNATSubnetsV6, + Key: []string{subnet}, + }, + ) + } + + } + nft, err := nodenft.GetNFTablesHelper() + if err != nil { + return fmt.Errorf("failed to get nftables: %v", err) + } + + tx := nft.NewTransaction() + tx.Flush(&knftables.Set{ + Name: types.NFTMgmtPortNoSNATSubnetsV4, + }) + tx.Flush(&knftables.Set{ + Name: types.NFTMgmtPortNoSNATSubnetsV6, + }) + + for _, elem := range subNetV4 { + tx.Add(elem) + } + for _, elem := range subNetV6 { + tx.Add(elem) + } + + return nft.Run(context.TODO(), tx) +} + // createPlatformManagementPort creates a management port attached to the node switch // that lets the node access its pods via their private IP address. This is used // for health checking and other management tasks. diff --git a/go-controller/pkg/node/managementport/port_linux_test.go b/go-controller/pkg/node/managementport/port_linux_test.go index d6d99d7577..13d7641c11 100644 --- a/go-controller/pkg/node/managementport/port_linux_test.go +++ b/go-controller/pkg/node/managementport/port_linux_test.go @@ -783,7 +783,7 @@ var _ = Describe("Management Port tests", func() { ipv4: &fakeMgmtPortIPFamilyConfig, netInfo: netInfo, } - err := setupManagementPortNFTSets() + err := SetupManagementPortNFTSets() Expect(err).NotTo(HaveOccurred()) err = setupManagementPortNFTChain(types.K8sMgmtIntfName, &fakeMgmtPortConfig) Expect(err).NotTo(HaveOccurred()) diff --git a/go-controller/pkg/node/node_ip_handler_linux.go b/go-controller/pkg/node/node_ip_handler_linux.go index a0c5ab21e8..dcbbbfc7d6 100644 --- a/go-controller/pkg/node/node_ip_handler_linux.go +++ b/go-controller/pkg/node/node_ip_handler_linux.go @@ -20,6 +20,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -37,21 +38,21 @@ type addressManager struct { syncPeriod time.Duration // compare node primary IP change nodePrimaryAddr net.IP - gatewayBridge *bridgeConfiguration + gatewayBridge *bridgeconfig.BridgeConfiguration OnChanged func() sync.Mutex } // initializes a new address manager which will hold all the IPs on a node -func newAddressManager(nodeName string, k kube.Interface, mgmtPort managementport.Interface, watchFactory factory.NodeWatchFactory, gwBridge *bridgeConfiguration) *addressManager { +func newAddressManager(nodeName string, k kube.Interface, mgmtPort managementport.Interface, watchFactory factory.NodeWatchFactory, gwBridge *bridgeconfig.BridgeConfiguration) *addressManager { return newAddressManagerInternal(nodeName, k, mgmtPort, watchFactory, gwBridge, true) } // newAddressManagerInternal creates a new address manager; this function is // only expose for testcases to disable netlink subscription to ensure // reproducibility of unit tests. -func newAddressManagerInternal(nodeName string, k kube.Interface, mgmtPort managementport.Interface, watchFactory factory.NodeWatchFactory, gwBridge *bridgeConfiguration, useNetlink bool) *addressManager { +func newAddressManagerInternal(nodeName string, k kube.Interface, mgmtPort managementport.Interface, watchFactory factory.NodeWatchFactory, gwBridge *bridgeconfig.BridgeConfiguration, useNetlink bool) *addressManager { mgr := &addressManager{ nodeName: nodeName, watchFactory: watchFactory, @@ -64,27 +65,11 @@ func newAddressManagerInternal(nodeName string, k kube.Interface, mgmtPort manag } mgr.nodeAnnotator = kube.NewNodeAnnotator(k, nodeName) if config.OvnKubeNode.Mode == types.NodeModeDPU { - var ifAddrs []*net.IPNet - - // update k8s.ovn.org/host-cidrs - node, err := watchFactory.GetNode(nodeName) - if err != nil { - klog.Errorf("Failed to get node %s: %v", nodeName, err) - return nil - } - if useNetlink { - // get updated interface IP addresses for the gateway bridge - ifAddrs, err = gwBridge.updateInterfaceIPAddresses(node) - if err != nil { - klog.Errorf("Failed to obtain interface IP addresses for node %s: %v", nodeName, err) - return nil - } - } - if err = mgr.updateHostCIDRs(ifAddrs); err != nil { + if err := mgr.updateHostCIDRs(); err != nil { klog.Errorf("Failed to update host-cidrs annotations on node %s: %v", nodeName, err) return nil } - if err = mgr.nodeAnnotator.Run(); err != nil { + if err := mgr.nodeAnnotator.Run(); err != nil { klog.Errorf("Failed to set host-cidrs annotations on node %s: %v", nodeName, err) return nil } @@ -278,14 +263,14 @@ func (c *addressManager) updateNodeAddressAnnotations() error { if c.useNetlink { // get updated interface IP addresses for the gateway bridge - ifAddrs, err = c.gatewayBridge.updateInterfaceIPAddresses(node) + ifAddrs, err = c.gatewayBridge.UpdateInterfaceIPAddresses(node) if err != nil { return err } } // update k8s.ovn.org/host-cidrs - if err = c.updateHostCIDRs(ifAddrs); err != nil { + if err = c.updateHostCIDRs(); err != nil { return err } @@ -315,14 +300,10 @@ func (c *addressManager) updateNodeAddressAnnotations() error { return nil } -func (c *addressManager) updateHostCIDRs(ifAddrs []*net.IPNet) error { +func (c *addressManager) updateHostCIDRs() error { if config.OvnKubeNode.Mode == types.NodeModeDPU { - // For DPU mode, here we need to use the DPU host's IP address which is the tenant cluster's - // host internal IP address instead. - // Currently we are only intentionally supporting IPv4 for DPU here. - nodeIPNetv4, _ := util.MatchFirstIPNetFamily(false, ifAddrs) - nodeAddrSet := sets.New[string](nodeIPNetv4.String()) - return util.SetNodeHostCIDRs(c.nodeAnnotator, nodeAddrSet) + // For DPU mode, we don't need to update the host-cidrs annotation. + return nil } return util.SetNodeHostCIDRs(c.nodeAnnotator, c.cidrs) @@ -437,7 +418,8 @@ func (c *addressManager) isValidNodeIP(addr net.IP, linkIndex int) bool { if util.IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnableInterconnect && config.Gateway.Mode != config.GatewayModeDisabled { // Two methods to lookup EIPs assigned to the gateway bridge. Fast path from a shared cache or slow path from node annotations. // At startup, gateway bridge cache gets sync - if c.gatewayBridge.eipMarkIPs != nil && c.gatewayBridge.eipMarkIPs.HasSyncdOnce() && c.gatewayBridge.eipMarkIPs.IsIPPresent(addr) { + eipMarkIPs := c.gatewayBridge.GetEIPMarkIPs() + if eipMarkIPs != nil && eipMarkIPs.HasSyncdOnce() && eipMarkIPs.IsIPPresent(addr) { return false } else { if eipAddresses, err := c.getPrimaryHostEgressIPs(); err != nil { diff --git a/go-controller/pkg/node/node_ip_handler_linux_test.go b/go-controller/pkg/node/node_ip_handler_linux_test.go index d8ff6710d9..aa819cdb8a 100644 --- a/go-controller/pkg/node/node_ip_handler_linux_test.go +++ b/go-controller/pkg/node/node_ip_handler_linux_test.go @@ -21,6 +21,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" nodenft "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/nftables" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" nodemocks "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/mocks/github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node" @@ -401,7 +402,7 @@ func configureKubeOVNContext(nodeName string, useNetlink bool) *testCtx { mpmock := &nodemocks.ManagementPort{} mpmock.On("GetAddresses").Return([]*net.IPNet{tc.mgmtPortIP4, tc.mgmtPortIP6}) - fakeBridgeConfiguration := &bridgeConfiguration{bridgeName: "breth0"} + fakeBridgeConfiguration := bridgeconfig.TestBridgeConfig("breth0") k := &kube.Kube{KClient: tc.fakeClient} tc.ipManager = newAddressManagerInternal(nodeName, k, mpmock, tc.watchFactory, fakeBridgeConfiguration, useNetlink) diff --git a/go-controller/pkg/node/node_nftables.go b/go-controller/pkg/node/node_nftables.go index e52a8970a4..ca4afc9ac2 100644 --- a/go-controller/pkg/node/node_nftables.go +++ b/go-controller/pkg/node/node_nftables.go @@ -13,8 +13,8 @@ import ( const nftPMTUDChain = "no-pmtud" -// setupPMTUDNFTSets sets up the NFT sets that contain remote Kubernetes node IPs -func setupPMTUDNFTSets() error { +// setupRemoteNodeNFTSets sets up the NFT sets that contain remote Kubernetes node IPs +func setupRemoteNodeNFTSets() error { nft, err := nodenft.GetNFTablesHelper() if err != nil { return fmt.Errorf("failed to get nftables helper: %w", err) @@ -22,12 +22,12 @@ func setupPMTUDNFTSets() error { tx := nft.NewTransaction() tx.Add(&knftables.Set{ - Name: types.NFTNoPMTUDRemoteNodeIPsv4, + Name: types.NFTRemoteNodeIPsv4, Comment: knftables.PtrTo("Block egress ICMP needs frag to remote Kubernetes nodes"), Type: "ipv4_addr", }) tx.Add(&knftables.Set{ - Name: types.NFTNoPMTUDRemoteNodeIPsv6, + Name: types.NFTRemoteNodeIPsv6, Comment: knftables.PtrTo("Block egress ICMPv6 packet too big to remote Kubernetes nodes"), Type: "ipv6_addr", }) @@ -68,7 +68,7 @@ func setupPMTUDNFTChain() error { tx.Add(&knftables.Rule{ Chain: nftPMTUDChain, Rule: knftables.Concat( - "ip daddr @"+types.NFTNoPMTUDRemoteNodeIPsv4, + "ip daddr @"+types.NFTRemoteNodeIPsv4, "meta l4proto icmp", "icmp type 3", // type 3 == Destination Unreachable "icmp code 4", // code 4 indicates fragmentation needed @@ -85,7 +85,7 @@ func setupPMTUDNFTChain() error { "meta l4proto icmpv6", // match on ICMPv6 packets "icmpv6 type 2", // type 2 == Packet Too Big (PMTUD) "icmpv6 code 0", // code 0 for that message - "ip6 daddr @"+types.NFTNoPMTUDRemoteNodeIPsv6, + "ip6 daddr @"+types.NFTRemoteNodeIPsv6, counterIfDebug, "drop", // drop the packet ), diff --git a/go-controller/pkg/node/obj_retry_node.go b/go-controller/pkg/node/obj_retry_node.go index 148bb3cc40..646cca2ac3 100644 --- a/go-controller/pkg/node/obj_retry_node.go +++ b/go-controller/pkg/node/obj_retry_node.go @@ -11,6 +11,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/managementport" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -121,7 +122,7 @@ func (h *nodeEventHandler) AreResourcesEqual(obj1, obj2 interface{}) (bool, erro if !ok { return false, fmt.Errorf("could not cast obj2 of type %T to *kapi.Node", obj2) } - return reflect.DeepEqual(node1.Status.Addresses, node2.Status.Addresses), nil + return reflect.DeepEqual(node1.Status.Addresses, node2.Status.Addresses) && reflect.DeepEqual(node1.Annotations, node2.Annotations), nil default: return false, fmt.Errorf("no object comparison for type %s", h.objType) @@ -175,6 +176,13 @@ func (h *nodeEventHandler) AddResource(obj interface{}, _ bool) error { node := obj.(*corev1.Node) // if it's our node that is changing, then nothing to do as we dont add our own IP to the nftables rules if node.Name == h.nc.name { + if util.NodeDontSNATSubnetAnnotationExist(node) { + err := managementport.UpdateNoSNATSubnetsSets(node, util.ParseNodeDontSNATSubnetsList) + if err != nil { + return fmt.Errorf("error updating no snat subnets sets: %w", err) + } + } + return nil } return h.nc.addOrUpdateNode(node) @@ -218,37 +226,55 @@ func (h *nodeEventHandler) UpdateResource(oldObj, newObj interface{}, _ bool) er // if it's our node that is changing, then nothing to do as we dont add our own IP to the nftables rules if newNode.Name == h.nc.name { + + // if node's dont SNAT subnet annotation changed sync nftables + if !reflect.DeepEqual(oldNode.Annotations, newNode.Annotations) && + util.NodeDontSNATSubnetAnnotationChanged(oldNode, newNode) { + err := managementport.UpdateNoSNATSubnetsSets(newNode, util.ParseNodeDontSNATSubnetsList) + if err != nil { + return fmt.Errorf("error updating no snat subnets sets: %w", err) + } + } return nil } - // remote node that is changing - ipsToKeep := map[string]bool{} - for _, address := range newNode.Status.Addresses { - if address.Type != corev1.NodeInternalIP { - continue + if util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) { + // remote node that is changing + // Use GetNodeAddresses to get new node IPs + newIPsv4, newIPsv6, err := util.GetNodeAddresses(config.IPv4Mode, config.IPv6Mode, newNode) + if err != nil { + return fmt.Errorf("failed to get addresses for new node %q: %w", newNode.Name, err) } - nodeIP := net.ParseIP(address.Address) - if nodeIP == nil { - continue + + ipsToKeep := map[string]bool{} + for _, nodeIP := range newIPsv4 { + ipsToKeep[nodeIP.String()] = true } - ipsToKeep[nodeIP.String()] = true - } - ipsToRemove := make([]net.IP, 0) - for _, address := range oldNode.Status.Addresses { - if address.Type != corev1.NodeInternalIP { - continue + for _, nodeIP := range newIPsv6 { + ipsToKeep[nodeIP.String()] = true } - nodeIP := net.ParseIP(address.Address) - if nodeIP == nil { - continue + + // Use GetNodeAddresses to get old node IPs + oldIPsv4, oldIPsv6, err := util.GetNodeAddresses(config.IPv4Mode, config.IPv6Mode, oldNode) + if err != nil { + return fmt.Errorf("failed to get addresses for old node %q: %w", oldNode.Name, err) } - if _, exists := ipsToKeep[nodeIP.String()]; !exists { - ipsToRemove = append(ipsToRemove, nodeIP) + + ipsToRemove := make([]net.IP, 0) + for _, nodeIP := range oldIPsv4 { + if _, exists := ipsToKeep[nodeIP.String()]; !exists { + ipsToRemove = append(ipsToRemove, nodeIP) + } + } + for _, nodeIP := range oldIPsv6 { + if _, exists := ipsToKeep[nodeIP.String()]; !exists { + ipsToRemove = append(ipsToRemove, nodeIP) + } } - } - if err := removePMTUDNodeNFTRules(ipsToRemove); err != nil { - return fmt.Errorf("error removing node %q stale NFT rules during update: %w", oldNode.Name, err) + if err := removePMTUDNodeNFTRules(ipsToRemove); err != nil { + return fmt.Errorf("error removing node %q stale NFT rules during update: %w", oldNode.Name, err) + } } return h.nc.addOrUpdateNode(newNode) @@ -273,6 +299,10 @@ func (h *nodeEventHandler) DeleteResource(obj, _ interface{}) error { case factory.NodeType: h.nc.deleteNode(obj.(*corev1.Node)) + _ = managementport.UpdateNoSNATSubnetsSets(obj.(*corev1.Node), func(_ *corev1.Node) ([]string, error) { + return []string{}, nil + }) + return nil default: diff --git a/go-controller/pkg/node/openflow_manager.go b/go-controller/pkg/node/openflow_manager.go index 5fa7d77865..de3a721519 100644 --- a/go-controller/pkg/node/openflow_manager.go +++ b/go-controller/pkg/node/openflow_manager.go @@ -13,13 +13,15 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/udn" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/bridgeconfig" + nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) type openflowManager struct { - defaultBridge *bridgeConfiguration - externalGatewayBridge *bridgeConfiguration + defaultBridge *bridgeconfig.BridgeConfiguration + externalGatewayBridge *bridgeconfig.BridgeConfiguration // flow cache, use map instead of array for readability when debugging flowCache map[string][]string flowMutex sync.Mutex @@ -31,20 +33,20 @@ type openflowManager struct { // UTILs Needed for UDN (also leveraged for default netInfo) in openflowmanager -func (c *openflowManager) getDefaultBridgePortConfigurations() ([]*bridgeUDNConfiguration, string, string) { - return c.defaultBridge.getBridgePortConfigurations() +func (c *openflowManager) getDefaultBridgePortConfigurations() ([]*bridgeconfig.BridgeUDNConfiguration, string, string) { + return c.defaultBridge.GetPortConfigurations() } -func (c *openflowManager) getExGwBridgePortConfigurations() ([]*bridgeUDNConfiguration, string, string) { - return c.externalGatewayBridge.getBridgePortConfigurations() +func (c *openflowManager) getExGwBridgePortConfigurations() ([]*bridgeconfig.BridgeUDNConfiguration, string, string) { + return c.externalGatewayBridge.GetPortConfigurations() } func (c *openflowManager) addNetwork(nInfo util.NetInfo, nodeSubnets []*net.IPNet, masqCTMark, pktMark uint, v6MasqIPs, v4MasqIPs *udn.MasqueradeIPs) error { - if err := c.defaultBridge.addNetworkBridgeConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { + if err := c.defaultBridge.AddNetworkConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { return err } if c.externalGatewayBridge != nil { - if err := c.externalGatewayBridge.addNetworkBridgeConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { + if err := c.externalGatewayBridge.AddNetworkConfig(nInfo, nodeSubnets, masqCTMark, pktMark, v6MasqIPs, v4MasqIPs); err != nil { return err } } @@ -52,34 +54,28 @@ func (c *openflowManager) addNetwork(nInfo util.NetInfo, nodeSubnets []*net.IPNe } func (c *openflowManager) delNetwork(nInfo util.NetInfo) { - c.defaultBridge.delNetworkBridgeConfig(nInfo) + c.defaultBridge.DelNetworkConfig(nInfo) if c.externalGatewayBridge != nil { - c.externalGatewayBridge.delNetworkBridgeConfig(nInfo) + c.externalGatewayBridge.DelNetworkConfig(nInfo) } } -func (c *openflowManager) getActiveNetwork(nInfo util.NetInfo) *bridgeUDNConfiguration { - return c.defaultBridge.getActiveNetworkBridgeConfigCopy(nInfo.GetNetworkName()) +func (c *openflowManager) getActiveNetwork(nInfo util.NetInfo) *bridgeconfig.BridgeUDNConfiguration { + return c.defaultBridge.GetActiveNetworkBridgeConfigCopy(nInfo.GetNetworkName()) } // END UDN UTILs func (c *openflowManager) getDefaultBridgeName() string { - c.defaultBridge.Lock() - defer c.defaultBridge.Unlock() - return c.defaultBridge.bridgeName + return c.defaultBridge.GetBridgeName() } func (c *openflowManager) getDefaultBridgeMAC() net.HardwareAddr { - c.defaultBridge.Lock() - defer c.defaultBridge.Unlock() - return c.defaultBridge.macAddress + return c.defaultBridge.GetMAC() } func (c *openflowManager) setDefaultBridgeMAC(macAddr net.HardwareAddr) { - c.defaultBridge.Lock() - defer c.defaultBridge.Unlock() - c.defaultBridge.macAddress = macAddr + c.defaultBridge.SetMAC(macAddr) } func (c *openflowManager) updateFlowCacheEntry(key string, flows []string) { @@ -116,10 +112,6 @@ func (c *openflowManager) requestFlowSync() { } func (c *openflowManager) syncFlows() { - // protect gwBridge config from being updated by gw.nodeIPManager - c.defaultBridge.Lock() - defer c.defaultBridge.Unlock() - c.flowMutex.Lock() defer c.flowMutex.Unlock() @@ -128,15 +120,12 @@ func (c *openflowManager) syncFlows() { flows = append(flows, entry...) } - _, stderr, err := util.ReplaceOFFlows(c.defaultBridge.bridgeName, flows) + _, stderr, err := util.ReplaceOFFlows(c.defaultBridge.GetBridgeName(), flows) if err != nil { klog.Errorf("Failed to add flows, error: %v, stderr, %s, flows: %s", err, stderr, c.flowCache) } if c.externalGatewayBridge != nil { - c.externalGatewayBridge.Lock() - defer c.externalGatewayBridge.Unlock() - c.exGWFlowMutex.Lock() defer c.exGWFlowMutex.Unlock() @@ -145,7 +134,7 @@ func (c *openflowManager) syncFlows() { flows = append(flows, entry...) } - _, stderr, err := util.ReplaceOFFlows(c.externalGatewayBridge.bridgeName, flows) + _, stderr, err := util.ReplaceOFFlows(c.externalGatewayBridge.GetBridgeName(), flows) if err != nil { klog.Errorf("Failed to add flows, error: %v, stderr, %s, flows: %s", err, stderr, c.exGWFlowCache) } @@ -160,7 +149,7 @@ func (c *openflowManager) syncFlows() { // // -- to handle host -> service access, via masquerading from the host to OVN GR // -- to handle external -> service(ExternalTrafficPolicy: Local) -> host access without SNAT -func newGatewayOpenFlowManager(gwBridge, exGWBridge *bridgeConfiguration) (*openflowManager, error) { +func newGatewayOpenFlowManager(gwBridge, exGWBridge *bridgeconfig.BridgeConfiguration) (*openflowManager, error) { // add health check function to check default OpenFlow flows are on the shared gateway bridge ofm := &openflowManager{ defaultBridge: gwBridge, @@ -211,16 +200,10 @@ func (c *openflowManager) Run(stopChan <-chan struct{}, doneWg *sync.WaitGroup) } func (c *openflowManager) updateBridgePMTUDFlowCache(key string, ipAddrs []string) { - // protect defaultBridge config from being updated by gw.nodeIPManager - c.defaultBridge.Lock() - defer c.defaultBridge.Unlock() - - dftFlows := pmtudDropFlows(c.defaultBridge, ipAddrs) + dftFlows := c.defaultBridge.PMTUDDropFlows(ipAddrs) c.updateFlowCacheEntry(key, dftFlows) if c.externalGatewayBridge != nil { - c.externalGatewayBridge.Lock() - defer c.externalGatewayBridge.Unlock() - exGWBridgeDftFlows := pmtudDropFlows(c.externalGatewayBridge, ipAddrs) + exGWBridgeDftFlows := c.externalGatewayBridge.PMTUDDropFlows(ipAddrs) c.updateExBridgeFlowCacheEntry(key, exGWBridgeDftFlows) } } @@ -228,59 +211,49 @@ func (c *openflowManager) updateBridgePMTUDFlowCache(key string, ipAddrs []strin // updateBridgeFlowCache generates the "static" per-bridge flows // note: this is shared between shared and local gateway modes func (c *openflowManager) updateBridgeFlowCache(hostIPs []net.IP, hostSubnets []*net.IPNet) error { - // protect defaultBridge config from being updated by gw.nodeIPManager - c.defaultBridge.Lock() - defer c.defaultBridge.Unlock() - // CAUTION: when adding new flows where the in_port is ofPortPatch and the out_port is ofPortPhys, ensure // that dl_src is included in match criteria! - dftFlows, err := flowsForDefaultBridge(c.defaultBridge, hostIPs) - if err != nil { - return err - } - dftCommonFlows, err := commonFlows(hostSubnets, c.defaultBridge) + dftFlows, err := c.defaultBridge.DefaultBridgeFlows(hostSubnets, hostIPs) if err != nil { return err } - dftFlows = append(dftFlows, dftCommonFlows...) c.updateFlowCacheEntry("NORMAL", []string{fmt.Sprintf("table=0,priority=0,actions=%s\n", util.NormalAction)}) c.updateFlowCacheEntry("DEFAULT", dftFlows) // we consume ex gw bridge flows only if that is enabled if c.externalGatewayBridge != nil { - c.externalGatewayBridge.Lock() - defer c.externalGatewayBridge.Unlock() - c.updateExBridgeFlowCacheEntry("NORMAL", []string{fmt.Sprintf("table=0,priority=0,actions=%s\n", util.NormalAction)}) - exGWBridgeDftFlows, err := commonFlows(hostSubnets, c.externalGatewayBridge) + exGWBridgeDftFlows, err := c.externalGatewayBridge.ExternalBridgeFlows(hostSubnets) if err != nil { return err } + + c.updateExBridgeFlowCacheEntry("NORMAL", []string{fmt.Sprintf("table=0,priority=0,actions=%s\n", util.NormalAction)}) c.updateExBridgeFlowCacheEntry("DEFAULT", exGWBridgeDftFlows) } return nil } -func checkPorts(netConfigs []*bridgeUDNConfiguration, physIntf, ofPortPhys string) error { +func checkPorts(netConfigs []*bridgeconfig.BridgeUDNConfiguration, physIntf, ofPortPhys string) error { // it could be that the ovn-controller recreated the patch between the host OVS bridge and // the integration bridge, as a result the ofport number changed for that patch interface for _, netConfig := range netConfigs { - if netConfig.ofPortPatch == "" { + if netConfig.OfPortPatch == "" { continue } - curOfportPatch, stderr, err := util.GetOVSOfPort("--if-exists", "get", "Interface", netConfig.patchPort, "ofport") + curOfportPatch, stderr, err := util.GetOVSOfPort("--if-exists", "get", "Interface", netConfig.PatchPort, "ofport") if err != nil { - return fmt.Errorf("failed to get ofport of %s, stderr: %q: %w", netConfig.patchPort, stderr, err) + return fmt.Errorf("failed to get ofport of %s, stderr: %q: %w", netConfig.PatchPort, stderr, err) } - if netConfig.ofPortPatch != curOfportPatch { - if netConfig.isDefaultNetwork() || curOfportPatch != "" { + if netConfig.OfPortPatch != curOfportPatch { + if netConfig.IsDefaultNetwork() { klog.Errorf("Fatal error: patch port %s ofport changed from %s to %s", - netConfig.patchPort, netConfig.ofPortPatch, curOfportPatch) + netConfig.PatchPort, netConfig.OfPortPatch, curOfportPatch) os.Exit(1) } else { - klog.Warningf("Patch port %s removed for existing network", netConfig.patchPort) + klog.Warningf("UDN patch port %s changed for existing network from %v to %v. Expecting bridge config update.", netConfig.PatchPort, netConfig.OfPortPatch, curOfportPatch) } } } @@ -362,10 +335,10 @@ func bootstrapOVSFlows(nodeName string) error { // for non-IP packets that would normally be forwarded with NORMAL action (table 0, priority 0 flow). dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=10, table=0, in_port=%s, dl_src=%s, actions=output:NORMAL", - defaultOpenFlowCookie, ofportPatch, bridgeMACAddress)) + nodetypes.DefaultOpenFlowCookie, ofportPatch, bridgeMACAddress)) dftFlows = append(dftFlows, fmt.Sprintf("cookie=%s, priority=9, table=0, in_port=%s, actions=drop", - defaultOpenFlowCookie, ofportPatch)) + nodetypes.DefaultOpenFlowCookie, ofportPatch)) dftFlows = append(dftFlows, "priority=0, table=0, actions=output:NORMAL") _, stderr, err = util.ReplaceOFFlows(bridge, dftFlows) diff --git a/go-controller/pkg/node/ovspinning/ovspinning_linux_test.go b/go-controller/pkg/node/ovspinning/ovspinning_linux_test.go index 634878d55c..3d9606079f 100644 --- a/go-controller/pkg/node/ovspinning/ovspinning_linux_test.go +++ b/go-controller/pkg/node/ovspinning/ovspinning_linux_test.go @@ -246,10 +246,12 @@ func assertPIDHasSchedAffinity(t *testing.T, pid int, expectedCPUSet unix.CPUSet require.NoError(t, err) for _, task := range tasks { - err := unix.SchedGetaffinity(task, &actual) - require.NoError(t, err) - assert.Equal(t, expectedCPUSet, actual, - "task[%d] of process[%d] Expected CPUSet %0x != Actual CPUSet %0x", task, pid, expectedCPUSet, actual) + assert.Eventually(t, func() bool { + err := unix.SchedGetaffinity(task, &actual) + assert.NoError(t, err) + + return actual == expectedCPUSet + }, time.Second, 10*time.Millisecond, "task[%d] of process[%d] Expected CPUSet %0x != Actual CPUSet %0x", task, pid, expectedCPUSet, actual) } } diff --git a/go-controller/pkg/node/types/const.go b/go-controller/pkg/node/types/const.go new file mode 100644 index 0000000000..bdf9c388bf --- /dev/null +++ b/go-controller/pkg/node/types/const.go @@ -0,0 +1,22 @@ +package types + +const ( + // CtMarkOVN is the conntrack mark value for OVN traffic + CtMarkOVN = "0x1" + // OvsLocalPort is the name of the OVS bridge local port + OvsLocalPort = "LOCAL" + // DefaultOpenFlowCookie identifies default open flow rules added to the host OVS bridge. + // The hex number 0xdeff105, aka defflos, is meant to sound like default flows. + DefaultOpenFlowCookie = "0xdeff105" + // OutputPortDrop is used to signify that there is no output port for an openflow action and the + // rendered action should result in a drop + OutputPortDrop = "output-port-drop" + // OvnKubeNodeSNATMark is used to mark packets that need to be SNAT-ed to nodeIP for + // traffic originating from egressIP and egressService controlled pods towards other nodes in the cluster. + OvnKubeNodeSNATMark = "0x3f0" + // PmtudOpenFlowCookie identifies the flows used to drop ICMP type (3) destination unreachable, + // fragmentation-needed (4) + PmtudOpenFlowCookie = "0x0304" + // CtMarkHost is the conntrack mark value for host traffic + CtMarkHost = "0x2" +) diff --git a/go-controller/pkg/node/util/util.go b/go-controller/pkg/node/util/util.go new file mode 100644 index 0000000000..e04be61b39 --- /dev/null +++ b/go-controller/pkg/node/util/util.go @@ -0,0 +1,118 @@ +package util + +import ( + "fmt" + "net" + + net2 "k8s.io/utils/net" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + nodetypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node/types" + pkgutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" +) + +// GetNetworkInterfaceIPAddresses returns the IP addresses for the network interface 'iface'. +func GetNetworkInterfaceIPAddresses(iface string) ([]*net.IPNet, error) { + allIPs, err := pkgutil.GetFilteredInterfaceV4V6IPs(iface) + if err != nil { + return nil, fmt.Errorf("could not find IP addresses: %v", err) + } + + var ips []*net.IPNet + var foundIPv4 bool + var foundIPv6 bool + for _, ip := range allIPs { + if net2.IsIPv6CIDR(ip) { + if config.IPv6Mode && !foundIPv6 { + // For IPv6 addresses with 128 prefix, let's try to find an appropriate subnet + // in the routing table + subnetIP, err := pkgutil.GetIPv6OnSubnet(iface, ip) + if err != nil { + return nil, fmt.Errorf("could not find IPv6 address on subnet: %v", err) + } + ips = append(ips, subnetIP) + foundIPv6 = true + } + } else if config.IPv4Mode && !foundIPv4 { + ips = append(ips, ip) + foundIPv4 = true + } + } + if config.IPv4Mode && !foundIPv4 { + return nil, fmt.Errorf("failed to find IPv4 address on interface %s", iface) + } else if config.IPv6Mode && !foundIPv6 { + return nil, fmt.Errorf("failed to find IPv6 address on interface %s", iface) + } + return ips, nil +} + +// GetDPUHostPrimaryIPAddresses returns the DPU host IP/Network based on K8s Node IP +// and DPU IP subnet overriden by config config.Gateway.RouterSubnet +func GetDPUHostPrimaryIPAddresses(k8sNodeIP net.IP, ifAddrs []*net.IPNet) ([]*net.IPNet, error) { + // Note(adrianc): No Dual-Stack support at this point as we rely on k8s node IP to derive gateway information + // for each node. + var gwIps []*net.IPNet + isIPv4 := net2.IsIPv4(k8sNodeIP) + + // override subnet mask via config + if config.Gateway.RouterSubnet != "" { + _, addr, err := net.ParseCIDR(config.Gateway.RouterSubnet) + if err != nil { + return nil, err + } + if net2.IsIPv4CIDR(addr) != isIPv4 { + return nil, fmt.Errorf("unexpected gateway router subnet provided (%s). "+ + "does not match Node IP address format", config.Gateway.RouterSubnet) + } + if !addr.Contains(k8sNodeIP) { + return nil, fmt.Errorf("unexpected gateway router subnet provided (%s). "+ + "subnet does not contain Node IP address (%s)", config.Gateway.RouterSubnet, k8sNodeIP) + } + addr.IP = k8sNodeIP + gwIps = append(gwIps, addr) + } else { + // Assume Host and DPU share the same subnet + // in this case just update the matching IPNet with the Host's IP address + for _, addr := range ifAddrs { + if net2.IsIPv4CIDR(addr) != isIPv4 { + continue + } + // expect k8s Node IP to be contained in the given subnet + if !addr.Contains(k8sNodeIP) { + continue + } + newAddr := *addr + newAddr.IP = k8sNodeIP + gwIps = append(gwIps, &newAddr) + } + if len(gwIps) == 0 { + return nil, fmt.Errorf("could not find subnet on DPU matching node IP %s", k8sNodeIP) + } + } + return gwIps, nil +} + +func GenerateICMPFragmentationFlow(ipAddr, outputPort, inPort, cookie string, priority int) string { + // we send any ICMP destination unreachable, fragmentation needed to the OVN pipeline too so that + // path MTU discovery continues to work. + icmpMatch := "icmp" + icmpType := 3 + icmpCode := 4 + nwDst := "nw_dst" + if net2.IsIPv6String(ipAddr) { + icmpMatch = "icmp6" + icmpType = 2 + icmpCode = 0 + nwDst = "ipv6_dst" + } + + action := fmt.Sprintf("output:%s", outputPort) + if outputPort == nodetypes.OutputPortDrop { + action = "drop" + } + + icmpFragmentationFlow := fmt.Sprintf("cookie=%s, priority=%d, in_port=%s, %s, %s=%s, icmp_type=%d, "+ + "icmp_code=%d, actions=%s", + cookie, priority, inPort, icmpMatch, nwDst, ipAddr, icmpType, icmpCode, action) + return icmpFragmentationFlow +} diff --git a/go-controller/pkg/node/util/util_suite_test.go b/go-controller/pkg/node/util/util_suite_test.go new file mode 100644 index 0000000000..dc2d625792 --- /dev/null +++ b/go-controller/pkg/node/util/util_suite_test.go @@ -0,0 +1,13 @@ +package util + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestNodeSuite(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Node util Suite") +} diff --git a/go-controller/pkg/node/util/util_test.go b/go-controller/pkg/node/util/util_test.go new file mode 100644 index 0000000000..5ca6cc80a3 --- /dev/null +++ b/go-controller/pkg/node/util/util_test.go @@ -0,0 +1,57 @@ +package util + +import ( + "net" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("node util tests", func() { + BeforeEach(func() { + Expect(config.PrepareTestConfig()).To(Succeed()) + }) + + Context("GetDPUHostPrimaryIPAddresses", func() { + + It("returns Gateway IP/Subnet for kubernetes node IP", func() { + _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") + nodeIP := net.ParseIP("10.0.0.11") + expectedGwSubnet := []*net.IPNet{ + {IP: nodeIP, Mask: net.CIDRMask(24, 32)}, + } + gwSubnet, err := GetDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) + Expect(err).ToNot(HaveOccurred()) + Expect(gwSubnet).To(Equal(expectedGwSubnet)) + }) + + It("Fails if node IP is not in host subnets", func() { + _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") + nodeIP := net.ParseIP("10.0.1.11") + _, err := GetDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) + Expect(err).To(HaveOccurred()) + }) + + It("returns node IP with config.Gateway.RouterSubnet subnet", func() { + config.Gateway.RouterSubnet = "10.1.0.0/16" + _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") + nodeIP := net.ParseIP("10.1.0.11") + expectedGwSubnet := []*net.IPNet{ + {IP: nodeIP, Mask: net.CIDRMask(16, 32)}, + } + gwSubnet, err := GetDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) + Expect(err).ToNot(HaveOccurred()) + Expect(gwSubnet).To(Equal(expectedGwSubnet)) + }) + + It("Fails if node IP is not in config.Gateway.RouterSubnet subnet", func() { + config.Gateway.RouterSubnet = "10.1.0.0/16" + _, dpuSubnet, _ := net.ParseCIDR("10.0.0.101/24") + nodeIP := net.ParseIP("10.0.0.11") + _, err := GetDPUHostPrimaryIPAddresses(nodeIP, []*net.IPNet{dpuSubnet}) + Expect(err).To(HaveOccurred()) + }) + }) +}) diff --git a/go-controller/pkg/observability/observability.go b/go-controller/pkg/observability/observability.go index a3ffbb54f3..9348966f13 100644 --- a/go-controller/pkg/observability/observability.go +++ b/go-controller/pkg/observability/observability.go @@ -10,8 +10,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/observability/observability_test.go b/go-controller/pkg/observability/observability_test.go index cfda506362..a247150093 100644 --- a/go-controller/pkg/observability/observability_test.go +++ b/go-controller/pkg/observability/observability_test.go @@ -4,7 +4,7 @@ import ( "strings" "time" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/address_set/address_set.go b/go-controller/pkg/ovn/address_set/address_set.go index a0b709eafc..ea5e035e22 100644 --- a/go-controller/pkg/ovn/address_set/address_set.go +++ b/go-controller/pkg/ovn/address_set/address_set.go @@ -7,8 +7,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/address_set/address_set_test.go b/go-controller/pkg/ovn/address_set/address_set_test.go index 40dec33d24..4c1c0af814 100644 --- a/go-controller/pkg/ovn/address_set/address_set_test.go +++ b/go-controller/pkg/ovn/address_set/address_set_test.go @@ -5,8 +5,8 @@ import ( "github.com/onsi/gomega" "github.com/urfave/cli/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/address_set/fake_address_set.go b/go-controller/pkg/ovn/address_set/fake_address_set.go index 48f56bb616..2f783b3486 100644 --- a/go-controller/pkg/ovn/address_set/fake_address_set.go +++ b/go-controller/pkg/ovn/address_set/fake_address_set.go @@ -11,7 +11,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/address_set/mocks/AddressSet.go b/go-controller/pkg/ovn/address_set/mocks/AddressSet.go index f8b9761b26..f5dd89448f 100644 --- a/go-controller/pkg/ovn/address_set/mocks/AddressSet.go +++ b/go-controller/pkg/ovn/address_set/mocks/AddressSet.go @@ -3,7 +3,7 @@ package mocks import ( - ovsdb "github.com/ovn-org/libovsdb/ovsdb" + ovsdb "github.com/ovn-kubernetes/libovsdb/ovsdb" mock "github.com/stretchr/testify/mock" ) diff --git a/go-controller/pkg/ovn/address_set/mocks/AddressSetFactory.go b/go-controller/pkg/ovn/address_set/mocks/AddressSetFactory.go index f76d6f132a..0d18215185 100644 --- a/go-controller/pkg/ovn/address_set/mocks/AddressSetFactory.go +++ b/go-controller/pkg/ovn/address_set/mocks/AddressSetFactory.go @@ -8,7 +8,7 @@ import ( ops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" - ovsdb "github.com/ovn-org/libovsdb/ovsdb" + ovsdb "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // AddressSetFactory is an autogenerated mock type for the AddressSetFactory type diff --git a/go-controller/pkg/ovn/admin_network_policy_test.go b/go-controller/pkg/ovn/admin_network_policy_test.go index 82eac3cf9d..152ee0c0a8 100644 --- a/go-controller/pkg/ovn/admin_network_policy_test.go +++ b/go-controller/pkg/ovn/admin_network_policy_test.go @@ -94,7 +94,7 @@ func getANPGressACL(action, anpName, direction string, rulePriority int32, ruleIndex int32, ports *[]anpapi.AdminNetworkPolicyPort, namedPorts map[string][]libovsdbutil.NamedNetworkPolicyPort, banp bool) []*nbdb.ACL { retACLs := []*nbdb.ACL{} - // we are not using BuildACL and instead manually building it on purpose so that the code path for BuildACL is also tested + // we are not using BuildACLWithDefaultTier and instead manually building it on purpose so that the code path for BuildACLWithDefaultTier is also tested acl := nbdb.ACL{} acl.Action = action acl.Severity = nil diff --git a/go-controller/pkg/ovn/base_event_handler.go b/go-controller/pkg/ovn/base_event_handler.go index b08bc30b99..f54afc9925 100644 --- a/go-controller/pkg/ovn/base_event_handler.go +++ b/go-controller/pkg/ovn/base_event_handler.go @@ -14,7 +14,7 @@ import ( egressfirewall "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/recorders" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) @@ -213,11 +213,11 @@ func (h *baseNetworkControllerEventHandler) recordAddEvent(objType reflect.Type, case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording add event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) klog.V(5).Infof("Recording add event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) - metrics.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) + recorders.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) } } @@ -227,11 +227,11 @@ func (h *baseNetworkControllerEventHandler) recordUpdateEvent(objType reflect.Ty case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording update event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) klog.V(5).Infof("Recording update event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) - metrics.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) + recorders.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) } } @@ -241,11 +241,11 @@ func (h *baseNetworkControllerEventHandler) recordDeleteEvent(objType reflect.Ty case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording delete event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) klog.V(5).Infof("Recording delete event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) - metrics.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) + recorders.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) } } @@ -255,10 +255,10 @@ func (h *baseNetworkControllerEventHandler) recordSuccessEvent(objType reflect.T case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording success event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().End("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().End("networkpolicy", np.Namespace, np.Name) case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) klog.V(5).Infof("Recording success event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) - metrics.GetConfigDurationRecorder().End("multinetworkpolicy", mnp.Namespace, mnp.Name) + recorders.GetConfigDurationRecorder().End("multinetworkpolicy", mnp.Namespace, mnp.Name) } } diff --git a/go-controller/pkg/ovn/base_network_controller.go b/go-controller/pkg/ovn/base_network_controller.go index 1a3f8685e4..aba4b9ab04 100644 --- a/go-controller/pkg/ovn/base_network_controller.go +++ b/go-controller/pkg/ovn/base_network_controller.go @@ -14,8 +14,6 @@ import ( corev1 "k8s.io/api/core/v1" knet "k8s.io/api/networking/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" @@ -24,8 +22,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/pod" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -35,6 +33,7 @@ import ( libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/recorders" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability" @@ -192,8 +191,7 @@ type BaseNetworkController struct { func (oc *BaseNetworkController) reconcile(netInfo util.NetInfo, setNodeFailed func(string)) error { // gather some information first - var err error - var retryNodes []*corev1.Node + var reconcileNodes []string oc.localZoneNodes.Range(func(key, _ any) bool { nodeName := key.(string) wasAdvertised := util.IsPodNetworkAdvertisedAtNode(oc, nodeName) @@ -202,51 +200,87 @@ func (oc *BaseNetworkController) reconcile(netInfo util.NetInfo, setNodeFailed f // noop return true } - var node *corev1.Node - node, err = oc.watchFactory.GetNode(nodeName) - if err != nil { - return false - } - retryNodes = append(retryNodes, node) + reconcileNodes = append(reconcileNodes, nodeName) return true }) - if err != nil { - return fmt.Errorf("failed to reconcile network %s: %w", oc.GetNetworkName(), err) - } reconcileRoutes := oc.routeImportManager != nil && oc.routeImportManager.NeedsReconciliation(netInfo) reconcilePendingPods := !oc.IsDefault() && !oc.ReconcilableNetInfo.EqualNADs(netInfo.GetNADs()...) + reconcileNamespaces := sets.NewString() + if oc.IsPrimaryNetwork() { + // since CanServeNamespace filters out namespace events for namespaces unknown + // to be served by this primary network, we need to reconcile namespaces once + // the network is reconfigured to serve a namespace. + reconcileNamespaces = sets.NewString(netInfo.GetNADNamespaces()...).Difference( + sets.NewString(oc.GetNADNamespaces()...)) + } // set the new NetInfo, point of no return - err = util.ReconcileNetInfo(oc.ReconcilableNetInfo, netInfo) + err := util.ReconcileNetInfo(oc.ReconcilableNetInfo, netInfo) if err != nil { return fmt.Errorf("failed to reconcile network information for network %s: %v", oc.GetNetworkName(), err) } + oc.doReconcile(reconcileRoutes, reconcilePendingPods, reconcileNodes, setNodeFailed, reconcileNamespaces.List()) + + return nil +} + +// doReconcile performs the reconciliation after the controller NetInfo has already being +// updated with the changes. What needs to be reconciled should already be known and +// provided on the arguments of the method. This method returns no error and logs them +// instead since once the controller NetInfo has been updated there is no point in retrying. +func (oc *BaseNetworkController) doReconcile(reconcileRoutes, reconcilePendingPods bool, + reconcileNodes []string, setNodeFailed func(string), reconcileNamespaces []string) { if reconcileRoutes { - err = oc.routeImportManager.ReconcileNetwork(oc.GetNetworkName()) + err := oc.routeImportManager.ReconcileNetwork(oc.GetNetworkName()) if err != nil { klog.Errorf("Failed to reconcile network %s on route import controller: %v", oc.GetNetworkName(), err) } } - for _, node := range retryNodes { - setNodeFailed(node.Name) + for _, nodeName := range reconcileNodes { + setNodeFailed(nodeName) + node, err := oc.watchFactory.GetNode(nodeName) + if err != nil { + klog.Infof("Failed to get node %s for reconciling network %s: %v", nodeName, oc.GetNetworkName(), err) + continue + } err = oc.retryNodes.AddRetryObjWithAddNoBackoff(node) if err != nil { - klog.Errorf("Failed to retry node %s for network %s: %v", node.Name, oc.GetNetworkName(), err) + klog.Errorf("Failed to retry node %s for network %s: %v", nodeName, oc.GetNetworkName(), err) } } - if len(retryNodes) > 0 { + if len(reconcileNodes) > 0 { oc.retryNodes.RequestRetryObjs() } if reconcilePendingPods { - if err := ovnretry.RequeuePendingPods(oc.kube, oc.GetNetInfo(), oc.retryPods); err != nil { + if err := ovnretry.RequeuePendingPods(oc.watchFactory, oc.GetNetInfo(), oc.retryPods); err != nil { klog.Errorf("Failed to requeue pending pods for network %s: %v", oc.GetNetworkName(), err) } } - return nil + // reconciles namespaces that were added to the network, this will trigger namespace add event and + // network controller creates the address set for the namespace. + // To update gress policy ACLs with peer namespace address set, invoke requeuePeerNamespace method after + // address set is created for the namespace. + namespaceAdded := false + for _, ns := range reconcileNamespaces { + namespace, err := oc.watchFactory.GetNamespace(ns) + if err != nil { + klog.Infof("Failed to get namespace %s for reconciling network %s: %v", ns, oc.GetNetworkName(), err) + continue + } + err = oc.retryNamespaces.AddRetryObjWithAddNoBackoff(namespace) + if err != nil { + klog.Infof("Failed to retry namespace %s for network %s: %v", ns, oc.GetNetworkName(), err) + continue + } + namespaceAdded = true + } + if namespaceAdded { + oc.retryNamespaces.RequestRetryObjs() + } } // BaseSecondaryNetworkController structure holds per-network fields and network specific @@ -319,7 +353,7 @@ func (bnc *BaseNetworkController) GetLogicalPortName(pod *corev1.Pod, nadName st func (bnc *BaseNetworkController) AddConfigDurationRecord(kind, namespace, name string) ( []ovsdb.Operation, func(), time.Time, error) { if !bnc.IsSecondary() { - return metrics.GetConfigDurationRecorder().AddOVN(bnc.nbClient, kind, namespace, name) + return recorders.GetConfigDurationRecorder().AddOVN(bnc.nbClient, kind, namespace, name) } // TBD: no op for secondary network for now return []ovsdb.Operation{}, func() {}, time.Time{}, nil @@ -391,7 +425,7 @@ func (bnc *BaseNetworkController) syncNodeClusterRouterPort(node *corev1.Node, h enableGatewayMTU := util.ParseNodeGatewayMTUSupport(node) if enableGatewayMTU { lrpOptions = map[string]string{ - "gateway_mtu": strconv.Itoa(config.Default.MTU), + libovsdbops.GatewayMTU: strconv.Itoa(config.Default.MTU), } } logicalRouterPort := nbdb.LogicalRouterPort{ @@ -526,7 +560,7 @@ func (bnc *BaseNetworkController) createNodeLogicalSwitch(nodeName string, hostS Type: "router", Addresses: []string{"router"}, Options: map[string]string{ - "router-port": types.RouterToSwitchPrefix + switchName, + libovsdbops.RouterPort: types.RouterToSwitchPrefix + switchName, }, } if bnc.IsDefault() { @@ -579,12 +613,10 @@ func (bnc *BaseNetworkController) deleteNodeLogicalNetwork(nodeName string) erro func (bnc *BaseNetworkController) addAllPodsOnNode(nodeName string) []error { errs := []error{} - pods, err := bnc.kube.GetPods(metav1.NamespaceAll, metav1.ListOptions{ - FieldSelector: fields.OneTermEqualSelector("spec.nodeName", nodeName).String(), - }) + pods, err := bnc.watchFactory.GetAllPods() if err != nil { errs = append(errs, err) - klog.Errorf("Unable to list existing pods on node: %s, existing pods on this node may not function", + klog.Errorf("Unable to list existing pods for synchronizing node: %s, existing pods on this node may not function", nodeName) } else { klog.V(5).Infof("When adding node %s for network %s, found %d pods to add to retryPods", nodeName, bnc.GetNetworkName(), len(pods)) @@ -593,6 +625,9 @@ func (bnc *BaseNetworkController) addAllPodsOnNode(nodeName string) []error { if util.PodCompleted(&pod) { continue } + if pod.Spec.NodeName != nodeName { + continue + } klog.V(5).Infof("Adding pod %s/%s to retryPods for network %s", pod.Namespace, pod.Name, bnc.GetNetworkName()) err = bnc.retryPods.AddRetryObjWithAddNoBackoff(&pod) if err != nil { diff --git a/go-controller/pkg/ovn/base_network_controller_multicast.go b/go-controller/pkg/ovn/base_network_controller_multicast.go index eadb47882a..6f413177d5 100644 --- a/go-controller/pkg/ovn/base_network_controller_multicast.go +++ b/go-controller/pkg/ovn/base_network_controller_multicast.go @@ -119,13 +119,13 @@ func (bnc *BaseNetworkController) createMulticastAllowPolicy(ns string, nsInfo * egressMatch := libovsdbutil.GetACLMatch(portGroupName, bnc.getMulticastACLEgrMatch(), aclDir) dbIDs := getNamespaceMcastACLDbIDs(ns, aclDir, bnc.controllerName) aclPipeline := libovsdbutil.ACLDirectionToACLPipeline(aclDir) - egressACL := libovsdbutil.BuildACL(dbIDs, types.DefaultMcastAllowPriority, egressMatch, nbdb.ACLActionAllow, nil, aclPipeline) + egressACL := libovsdbutil.BuildACLWithDefaultTier(dbIDs, types.DefaultMcastAllowPriority, egressMatch, nbdb.ACLActionAllow, nil, aclPipeline) aclDir = libovsdbutil.ACLIngress ingressMatch := libovsdbutil.GetACLMatch(portGroupName, bnc.getMulticastACLIgrMatch(nsInfo), aclDir) dbIDs = getNamespaceMcastACLDbIDs(ns, aclDir, bnc.controllerName) aclPipeline = libovsdbutil.ACLDirectionToACLPipeline(aclDir) - ingressACL := libovsdbutil.BuildACL(dbIDs, types.DefaultMcastAllowPriority, ingressMatch, nbdb.ACLActionAllow, nil, aclPipeline) + ingressACL := libovsdbutil.BuildACLWithDefaultTier(dbIDs, types.DefaultMcastAllowPriority, ingressMatch, nbdb.ACLActionAllow, nil, aclPipeline) acls := []*nbdb.ACL{egressACL, ingressACL} ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, bnc.GetSamplingConfig(), acls...) @@ -186,7 +186,7 @@ func (bnc *BaseNetworkController) createDefaultDenyMulticastPolicy() error { for _, aclDir := range []libovsdbutil.ACLDirection{libovsdbutil.ACLEgress, libovsdbutil.ACLIngress} { dbIDs := getDefaultMcastACLDbIDs(mcastDefaultDenyID, aclDir, bnc.controllerName) aclPipeline := libovsdbutil.ACLDirectionToACLPipeline(aclDir) - acl := libovsdbutil.BuildACL(dbIDs, types.DefaultMcastDenyPriority, match, nbdb.ACLActionDrop, nil, aclPipeline) + acl := libovsdbutil.BuildACLWithDefaultTier(dbIDs, types.DefaultMcastDenyPriority, match, nbdb.ACLActionDrop, nil, aclPipeline) acls = append(acls, acl) } ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, bnc.GetSamplingConfig(), acls...) @@ -228,7 +228,7 @@ func (bnc *BaseNetworkController) createDefaultAllowMulticastPolicy() error { match := libovsdbutil.GetACLMatch(rtrPGName, mcastMatch, aclDir) dbIDs := getDefaultMcastACLDbIDs(mcastAllowInterNodeID, aclDir, bnc.controllerName) aclPipeline := libovsdbutil.ACLDirectionToACLPipeline(aclDir) - acl := libovsdbutil.BuildACL(dbIDs, types.DefaultMcastAllowPriority, match, nbdb.ACLActionAllow, nil, aclPipeline) + acl := libovsdbutil.BuildACLWithDefaultTier(dbIDs, types.DefaultMcastAllowPriority, match, nbdb.ACLActionAllow, nil, aclPipeline) acls = append(acls, acl) } diff --git a/go-controller/pkg/ovn/base_network_controller_pods.go b/go-controller/pkg/ovn/base_network_controller_pods.go index c6a105aa2a..4d334cf6a3 100644 --- a/go-controller/pkg/ovn/base_network_controller_pods.go +++ b/go-controller/pkg/ovn/base_network_controller_pods.go @@ -18,8 +18,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ipallocator "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/ip" subnetipallocator "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/ip/subnet" @@ -535,7 +535,7 @@ func (bnc *BaseNetworkController) addLogicalPortToNetwork(pod *corev1.Pod, nadNa // rescheduled. if !config.Kubernetes.DisableRequestedChassis { - lsp.Options["requested-chassis"] = pod.Spec.NodeName + lsp.Options[libovsdbops.RequestedChassis] = pod.Spec.NodeName } // let's calculate if this network controller's role for this pod diff --git a/go-controller/pkg/ovn/base_network_controller_policy.go b/go-controller/pkg/ovn/base_network_controller_policy.go index f4c10bfacf..3be9b444f8 100644 --- a/go-controller/pkg/ovn/base_network_controller_policy.go +++ b/go-controller/pkg/ovn/base_network_controller_policy.go @@ -14,8 +14,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" @@ -23,6 +23,7 @@ import ( libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/retry" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" @@ -160,8 +161,8 @@ type networkPolicy struct { // network policy owns only 1 local pod handler localPodHandler *factory.Handler - // peer namespace handlers - nsHandlerList []*factory.Handler + // peer namespace reconcilers + reconcilePeerNamespaces []*peerNamespacesRetry // peerAddressSets stores PodSelectorAddressSet keys for peers that this network policy was successfully added to. // Required for cleanup. peerAddressSets []string @@ -186,17 +187,22 @@ type networkPolicy struct { cancelableContext *util.CancelableContext } +type peerNamespacesRetry struct { + retryFramework *retry.RetryFramework + handler *factory.Handler +} + func NewNetworkPolicy(policy *knet.NetworkPolicy) *networkPolicy { policyTypeIngress, policyTypeEgress := getPolicyType(policy) np := &networkPolicy{ - name: policy.Name, - namespace: policy.Namespace, - ingressPolicies: make([]*gressPolicy, 0), - egressPolicies: make([]*gressPolicy, 0), - isIngress: policyTypeIngress, - isEgress: policyTypeEgress, - nsHandlerList: make([]*factory.Handler, 0), - localPods: sync.Map{}, + name: policy.Name, + namespace: policy.Namespace, + ingressPolicies: make([]*gressPolicy, 0), + egressPolicies: make([]*gressPolicy, 0), + isIngress: policyTypeIngress, + isEgress: policyTypeEgress, + reconcilePeerNamespaces: make([]*peerNamespacesRetry, 0), + localPods: sync.Map{}, } return np } @@ -246,11 +252,11 @@ func (bnc *BaseNetworkController) addHairpinAllowACL() error { } ingressACLIDs := bnc.getNetpolDefaultACLDbIDs(string(knet.PolicyTypeIngress)) - ingressACL := libovsdbutil.BuildACL(ingressACLIDs, types.DefaultAllowPriority, match, + ingressACL := libovsdbutil.BuildACLWithDefaultTier(ingressACLIDs, types.DefaultAllowPriority, match, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) egressACLIDs := bnc.getNetpolDefaultACLDbIDs(string(knet.PolicyTypeEgress)) - egressACL := libovsdbutil.BuildACL(egressACLIDs, types.DefaultAllowPriority, match, + egressACL := libovsdbutil.BuildACLWithDefaultTier(egressACLIDs, types.DefaultAllowPriority, match, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportEgressAfterLB) ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, nil, ingressACL, egressACL) @@ -329,7 +335,7 @@ func (bnc *BaseNetworkController) addAllowACLFromNode(switchName string, mgmtPor } match := fmt.Sprintf("%s.src==%s", ipFamily, mgmtPortIP.String()) dbIDs := getAllowFromNodeACLDbIDs(switchName, mgmtPortIP.String(), bnc.controllerName) - nodeACL := libovsdbutil.BuildACL(dbIDs, types.DefaultAllowPriority, match, + nodeACL := libovsdbutil.BuildACLWithDefaultTier(dbIDs, types.DefaultAllowPriority, match, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) ops, err := libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, nil, bnc.GetSamplingConfig(), nodeACL) @@ -382,9 +388,9 @@ func (bnc *BaseNetworkController) buildDenyACLs(namespace, pgName string, aclLog allowMatch := libovsdbutil.GetACLMatch(pgName, arpAllowPolicyMatch, aclDir) aclPipeline := libovsdbutil.ACLDirectionToACLPipeline(aclDir) - denyACL = libovsdbutil.BuildACL(bnc.getDefaultDenyPolicyACLIDs(namespace, aclDir, defaultDenyACL), + denyACL = libovsdbutil.BuildACLWithDefaultTier(bnc.getDefaultDenyPolicyACLIDs(namespace, aclDir, defaultDenyACL), types.DefaultDenyPriority, denyMatch, nbdb.ACLActionDrop, aclLogging, aclPipeline) - allowACL = libovsdbutil.BuildACL(bnc.getDefaultDenyPolicyACLIDs(namespace, aclDir, arpAllowACL), + allowACL = libovsdbutil.BuildACLWithDefaultTier(bnc.getDefaultDenyPolicyACLIDs(namespace, aclDir, arpAllowACL), types.DefaultAllowPriority, allowMatch, nbdb.ACLActionAllow, nil, aclPipeline) return } @@ -1490,6 +1496,47 @@ func (bnc *BaseNetworkController) peerNamespaceUpdate(np *networkPolicy, gp *gre return err } +// requeuePeerNamespace enqueues the namespace into network policy peer namespace +// retry framework object(s) which need to be retried immediately with add event. +func (bnc *BaseNetworkController) requeuePeerNamespace(namespace *corev1.Namespace) error { + var errors []error + npKeys := bnc.networkPolicies.GetKeys() + for _, npKey := range npKeys { + err := bnc.networkPolicies.DoWithLock(npKey, func(npKey string) error { + np, ok := bnc.networkPolicies.Load(npKey) + if !ok { + return nil + } + np.RLock() + defer np.RUnlock() + if np.deleted { + return nil + } + var errors []error + for _, reconcilePeerNamespace := range np.reconcilePeerNamespaces { + // Filter out namespace when it's labels not matching with network policy peer namespace + // selector. + if !reconcilePeerNamespace.handler.FilterFunc(namespace) { + continue + } + err := reconcilePeerNamespace.retryFramework.AddRetryObjWithAddNoBackoff(namespace) + if err != nil { + errors = append(errors, fmt.Errorf("failed to retry peer namespace %s for network policy %s on network %s: %w", + namespace.Name, npKey, bnc.GetNetworkName(), err)) + continue + } + reconcilePeerNamespace.retryFramework.RequestRetryObjs() + } + return utilerrors.Join(errors...) + }) + if err != nil { + errors = append(errors, fmt.Errorf("failed to retry peer namespaces for network policy %s on network %s: %w", + npKey, bnc.GetNetworkName(), err)) + } + } + return utilerrors.Join(errors...) +} + // addPeerNamespaceHandler starts a watcher for PeerNamespaceSelectorType. // Sync function and Add event for every existing namespace will be executed sequentially first, and an error will be // returned if something fails. @@ -1523,7 +1570,18 @@ func (bnc *BaseNetworkController) addPeerNamespaceHandler( return err } - np.nsHandlerList = append(np.nsHandlerList, namespaceHandler) + // Add peer namespace retry framework object into np.reconcilePeerNamespaces so that when + // a new peer namespace is newly created later under UDN network, it gets reconciled and + // address set is created for the namespace. so we must reconcile it for network policy + // as well to update gress policy ACL with matching peer namespace address set. + if bnc.IsPrimaryNetwork() { + np.Lock() + np.reconcilePeerNamespaces = append(np.reconcilePeerNamespaces, + &peerNamespacesRetry{retryFramework: retryPeerNamespaces, + handler: namespaceHandler}) + np.Unlock() + } + return nil } @@ -1537,10 +1595,10 @@ func (bnc *BaseNetworkController) shutdownHandlers(np *networkPolicy) { bnc.watchFactory.RemovePodHandler(np.localPodHandler) np.localPodHandler = nil } - for _, handler := range np.nsHandlerList { - bnc.watchFactory.RemoveNamespaceHandler(handler) + for _, retry := range np.reconcilePeerNamespaces { + bnc.watchFactory.RemoveNamespaceHandler(retry.handler) } - np.nsHandlerList = make([]*factory.Handler, 0) + np.reconcilePeerNamespaces = make([]*peerNamespacesRetry, 0) } // The following 2 functions should return the same key for network policy based on k8s on internal networkPolicy object diff --git a/go-controller/pkg/ovn/base_network_controller_secondary.go b/go-controller/pkg/ovn/base_network_controller_secondary.go index cef46aaa6e..253524ff87 100644 --- a/go-controller/pkg/ovn/base_network_controller_secondary.go +++ b/go-controller/pkg/ovn/base_network_controller_secondary.go @@ -18,8 +18,8 @@ import ( utilnet "k8s.io/utils/net" "k8s.io/utils/ptr" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" @@ -679,7 +679,15 @@ func (bsnc *BaseSecondaryNetworkController) AddNamespaceForSecondaryNetwork(ns * if err != nil { return fmt.Errorf("failed to ensure namespace locked: %v", err) } - defer nsUnlock() + nsUnlock() + // Enqueue the UDN namespace into network policy controller if it needs to be + // processed by network policy peer namespace handlers. + if bsnc.IsPrimaryNetwork() { + err = bsnc.requeuePeerNamespace(ns) + if err != nil { + return fmt.Errorf("failed to requeue peer namespace %s: %v", ns.Name, err) + } + } return nil } @@ -804,7 +812,7 @@ func (oc *BaseSecondaryNetworkController) allowPersistentIPs() bool { // buildUDNEgressSNAT is used to build the conditional SNAT required on L3 and L2 UDNs to // steer traffic correctly via mp0 when leaving OVN to the host -func (bsnc *BaseSecondaryNetworkController) buildUDNEgressSNAT(localPodSubnets []*net.IPNet, outputPort string) ([]*nbdb.NAT, error) { +func (bsnc *BaseSecondaryNetworkController) buildUDNEgressSNAT(localPodSubnets []*net.IPNet, outputPort string, isUDNAdvertised bool) ([]*nbdb.NAT, error) { if len(localPodSubnets) == 0 { return nil, nil // nothing to do } @@ -820,10 +828,11 @@ func (bsnc *BaseSecondaryNetworkController) buildUDNEgressSNAT(localPodSubnets [ types.TopologyExternalID: bsnc.TopologyType(), } for _, localPodSubnet := range localPodSubnets { + ipFamily := utilnet.IPv4 + masqIP, err = udn.AllocateV4MasqueradeIPs(networkID) if utilnet.IsIPv6CIDR(localPodSubnet) { masqIP, err = udn.AllocateV6MasqueradeIPs(networkID) - } else { - masqIP, err = udn.AllocateV4MasqueradeIPs(networkID) + ipFamily = utilnet.IPv6 } if err != nil { return nil, err @@ -831,12 +840,43 @@ func (bsnc *BaseSecondaryNetworkController) buildUDNEgressSNAT(localPodSubnets [ if masqIP == nil { return nil, fmt.Errorf("masquerade IP cannot be empty network %s (%d): %v", bsnc.GetNetworkName(), networkID, err) } - snats = append(snats, libovsdbops.BuildSNATWithMatch(&masqIP.ManagementPort.IP, localPodSubnet, outputPort, - extIDs, getMasqueradeManagementIPSNATMatch(dstMac.String()))) + if !isUDNAdvertised { + snats = append(snats, libovsdbops.BuildSNATWithMatch(&masqIP.ManagementPort.IP, localPodSubnet, outputPort, + extIDs, getMasqueradeManagementIPSNATMatch(dstMac.String()))) + } else { + // For advertised networks, we need to SNAT any traffic leaving the pods from these networks towards the node IPs + // in the cluster. In order to do such a conditional SNAT, we need an address set that contains the node IPs in the cluster. + // Given that egressIP feature already has an address set containing these nodeIPs owned by the default network controller, let's re-use it. + dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, types.DefaultNetworkName, DefaultNetworkControllerName) + addrSet, err := bsnc.addressSetFactory.GetAddressSet(dbIDs) + if err != nil { + return nil, fmt.Errorf("cannot ensure that addressSet %s exists: %w", NodeIPAddrSetName, err) + } + ipv4ClusterNodeIPAS, ipv6ClusterNodeIPAS := addrSet.GetASHashNames() + + snats = append(snats, libovsdbops.BuildSNATWithMatch(&masqIP.ManagementPort.IP, localPodSubnet, outputPort, + extIDs, fmt.Sprintf("%s && (%s)", getMasqueradeManagementIPSNATMatch(dstMac.String()), + getClusterNodesDestinationBasedSNATMatch(ipv4ClusterNodeIPAS, ipv6ClusterNodeIPAS, ipFamily)))) + } } return snats, nil } +func getMasqueradeManagementIPSNATMatch(dstMac string) string { + return fmt.Sprintf("eth.dst == %s", dstMac) +} + +// getClusterNodesDestinationBasedSNATMatch creates destination-based SNAT match for the specified IP family +func getClusterNodesDestinationBasedSNATMatch(ipv4ClusterNodeIPAS, ipv6ClusterNodeIPAS string, ipFamily utilnet.IPFamily) string { + var match string + if ipFamily == utilnet.IPv4 { + match = fmt.Sprintf("ip4.dst == $%s", ipv4ClusterNodeIPAS) + } else { + match = fmt.Sprintf("ip6.dst == $%s", ipv6ClusterNodeIPAS) + } + return match +} + func (bsnc *BaseSecondaryNetworkController) ensureDHCP(pod *corev1.Pod, podAnnotation *util.PodAnnotation, lsp *nbdb.LogicalSwitchPort) error { opts := []kubevirt.DHCPConfigsOpt{} @@ -859,10 +899,6 @@ func (bsnc *BaseSecondaryNetworkController) ensureDHCP(pod *corev1.Pod, podAnnot return kubevirt.EnsureDHCPOptionsForLSP(bsnc.controllerName, bsnc.nbClient, pod, podAnnotation.IPs, lsp, opts...) } -func getMasqueradeManagementIPSNATMatch(dstMac string) string { - return fmt.Sprintf("eth.dst == %s", dstMac) -} - func (bsnc *BaseSecondaryNetworkController) requireDHCP(pod *corev1.Pod) bool { // Configure DHCP only for kubevirt VMs layer2 primary udn with subnets return kubevirt.IsPodOwnedByVirtualMachine(pod) && diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy.go b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy.go index 1433544a0b..b1d345bcf4 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy.go @@ -16,8 +16,8 @@ import ( "k8s.io/klog/v2" anpapi "sigs.k8s.io/network-policy-api/apis/v1alpha1" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go index 1d230a6a45..080dd22d19 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/admin_network_policy_controller.go @@ -21,7 +21,7 @@ import ( anpinformer "sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/apis/v1alpha1" anplister "sigs.k8s.io/network-policy-api/pkg/client/listers/apis/v1alpha1" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/metrics.go b/go-controller/pkg/ovn/controller/admin_network_policy/metrics.go index cd2e636ea8..dcf1fb6aab 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/metrics.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/metrics.go @@ -8,14 +8,14 @@ import ( anpapi "sigs.k8s.io/network-policy-api/apis/v1alpha1" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" ) // Descriptors used by the ANPControllerCollector below. var ( anpRuleCountDesc = prometheus.NewDesc( - prometheus.BuildFQName(metrics.MetricOvnkubeNamespace, metrics.MetricOvnkubeSubsystemController, "admin_network_policies_rules"), + prometheus.BuildFQName(types.MetricOvnkubeNamespace, types.MetricOvnkubeSubsystemController, "admin_network_policies_rules"), "The total number of rules across all admin network policies in the cluster", []string{ "direction", // direction is either "ingress" or "egress"; so cardinality is max 2 for this label @@ -23,7 +23,7 @@ var ( }, nil, ) banpRuleCountDesc = prometheus.NewDesc( - prometheus.BuildFQName(metrics.MetricOvnkubeNamespace, metrics.MetricOvnkubeSubsystemController, "baseline_admin_network_policies_rules"), + prometheus.BuildFQName(types.MetricOvnkubeNamespace, types.MetricOvnkubeSubsystemController, "baseline_admin_network_policies_rules"), "The total number of rules across all baseline admin network policies in the cluster", []string{ "direction", // direction is either "ingress" or "egress"; so cardinality is max 2 for this label diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/repair.go b/go-controller/pkg/ovn/controller/admin_network_policy/repair.go index 55bf85e71f..84c0cf2a50 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/repair.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/repair.go @@ -9,7 +9,7 @@ import ( "k8s.io/klog/v2" anpapi "sigs.k8s.io/network-policy-api/apis/v1alpha1" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/controller/admin_network_policy/status_test.go b/go-controller/pkg/ovn/controller/admin_network_policy/status_test.go index ac63e873e6..6a28fa60d3 100644 --- a/go-controller/pkg/ovn/controller/admin_network_policy/status_test.go +++ b/go-controller/pkg/ovn/controller/admin_network_policy/status_test.go @@ -13,7 +13,7 @@ import ( anpapi "sigs.k8s.io/network-policy-api/apis/v1alpha1" anpfake "sigs.k8s.io/network-policy-api/pkg/client/clientset/versioned/fake" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller.go b/go-controller/pkg/ovn/controller/apbroute/external_controller.go index cd034d67b7..73f6208e96 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller.go @@ -22,6 +22,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + v1pod "k8s.io/kubernetes/pkg/api/v1/pod" adminpolicybasedrouteapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" adminpolicybasedrouteinformer "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/adminpolicybasedroute/v1" @@ -565,10 +566,14 @@ func (m *externalPolicyManager) onPodUpdate(oldObj, newObj interface{}) { utilruntime.HandleError(errors.New("invalid Pod provided to onPodUpdate()")) return } - // if labels AND assigned Pod IPs AND the multus network status annotations are the same, skip processing changes to the pod. + // if labels AND assigned Pod IPs AND the multus network status annotations AND + // pod PodReady condition AND deletion timestamp (PodTerminating) are + // the same, skip processing changes to the pod. if reflect.DeepEqual(o.Labels, n.Labels) && reflect.DeepEqual(o.Status.PodIPs, n.Status.PodIPs) && - reflect.DeepEqual(o.Annotations[nettypes.NetworkStatusAnnot], n.Annotations[nettypes.NetworkStatusAnnot]) { + reflect.DeepEqual(o.Annotations[nettypes.NetworkStatusAnnot], n.Annotations[nettypes.NetworkStatusAnnot]) && + reflect.DeepEqual(v1pod.GetPodReadyCondition(o.Status), v1pod.GetPodReadyCondition(n.Status)) && + reflect.DeepEqual(o.DeletionTimestamp, n.DeletionTimestamp) { return } m.podQueue.Add(n) diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go index 57ab01d93b..6f521bf2bb 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_namespace_test.go @@ -201,14 +201,32 @@ var _ = Describe("OVN External Gateway namespace", func() { "k8s.ovn.org/routing-network": "", nettypes.NetworkStatusAnnot: fmt.Sprintf(network_status, annotatedPodIP)}, }, - Status: corev1.PodStatus{PodIPs: []corev1.PodIP{{IP: annotatedPodIP}}, Phase: corev1.PodRunning}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{{IP: annotatedPodIP}}, + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, } podGW = &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "pod", Namespace: namespaceGW.Name, Labels: map[string]string{"name": "pod"}, Annotations: map[string]string{nettypes.NetworkStatusAnnot: fmt.Sprintf(network_status, dynamicHopHostNetPodIP)}}, - Status: corev1.PodStatus{PodIPs: []corev1.PodIP{{IP: dynamicHopHostNetPodIP}}, Phase: corev1.PodRunning}, + Status: corev1.PodStatus{ + PodIPs: []corev1.PodIP{{IP: dynamicHopHostNetPodIP}}, + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, } namespaceTargetWithPod, namespaceTarget2WithPod, namespaceTarget2WithoutPod, namespaceGWWithPod *namespaceWithPods ) diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go index 9c49c474ba..2b2915f521 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod.go @@ -11,7 +11,10 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + v1pod "k8s.io/kubernetes/pkg/api/v1/pod" utilnet "k8s.io/utils/net" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" ) func (m *externalPolicyManager) syncPod(pod *corev1.Pod, routeQueue workqueue.TypedRateLimitingInterface[string]) error { @@ -28,6 +31,13 @@ func (m *externalPolicyManager) syncPod(pod *corev1.Pod, routeQueue workqueue.Ty } func getExGwPodIPs(gatewayPod *corev1.Pod, networkName string) (sets.Set[string], error) { + // If an external gateway pod is in terminating or not ready state then don't return the + // IPs for the external gateway pod + if util.PodTerminating(gatewayPod) || !v1pod.IsPodReadyConditionTrue(gatewayPod.Status) { + klog.Warningf("External gateway pod cannot serve traffic; it's in terminating or not ready state: %s/%s", gatewayPod.Namespace, gatewayPod.Name) + return nil, nil + } + if networkName != "" { return getMultusIPsFromNetworkName(gatewayPod, networkName) } diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go index 509940c730..7cbbcd7430 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_pod_test.go @@ -448,6 +448,163 @@ var _ = Describe("OVN External Gateway pod", func() { }) }) + + var _ = Context("When pod goes into terminating or not ready state", func() { + + DescribeTable("reconciles a pod gateway in terminating or not ready state that matches two policies", func( + terminating bool, + ) { + initController([]runtime.Object{namespaceGW, namespaceTarget, namespaceTarget2, targetPod1, targetPod2, pod1}, + []runtime.Object{dynamicPolicy, dynamicPolicyDiffTargetNS}) + + expectedPolicy1, expectedRefs1 := expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTarget2WithPod}, + nil, + []*namespaceWithPods{namespaceGWWithPod}, false) + + expectedPolicy2, expectedRefs2 := expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTargetWithPod}, + nil, + []*namespaceWithPods{namespaceGWWithPod}, false) + + eventuallyExpectNumberOfPolicies(2) + eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) + eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) + + if terminating { + By("Setting deletion timestamp for the ex gw pod") + setPodDeletionTimestamp(pod1, &metav1.Time{Time: time.Now().Add(1000 * time.Second)}, fakeClient) + } else { + By("Updating the ex gw pod status to mark it as not ready") + setPodConditionReady(pod1, corev1.ConditionFalse, fakeClient) + } + + expectedPolicy1, expectedRefs1 = expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTarget2WithPod}, + nil, + []*namespaceWithPods{namespaceGWWithoutPod}, false) + + expectedPolicy2, expectedRefs2 = expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTargetWithPod}, + nil, + []*namespaceWithPods{namespaceGWWithoutPod}, false) + + eventuallyExpectNumberOfPolicies(2) + eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) + eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) + }, + Entry("Gateway pod in terminating state", true), + Entry("Gateway pod in not ready state", false), + ) + + DescribeTable("reconciles a pod gateway in terminating or not ready state that does not match any policy", func( + terminating bool, + ) { + noMatchPolicy := newPolicy( + "noMatchPolicy", + &metav1.LabelSelector{MatchLabels: targetNamespace1Match}, + nil, + &metav1.LabelSelector{MatchLabels: gatewayNamespaceMatch}, + &metav1.LabelSelector{MatchLabels: map[string]string{"key": "nomatch"}}, + false, + ) + initController([]runtime.Object{namespaceGW, namespaceTarget, pod1}, []runtime.Object{noMatchPolicy}) + + expectedPolicy, expectedRefs := expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTargetWithoutPod}, + nil, + []*namespaceWithPods{namespaceGWWithoutPod}, false) + + eventuallyExpectNumberOfPolicies(1) + eventuallyExpectConfig(noMatchPolicy.Name, expectedPolicy, expectedRefs) + + if terminating { + By("Setting deletion timestamp for the ex gw pod") + setPodDeletionTimestamp(pod1, &metav1.Time{Time: time.Now().Add(1000 * time.Second)}, fakeClient) + } else { + By("Updating the ex gw pod status to mark it as not ready") + setPodConditionReady(pod1, corev1.ConditionFalse, fakeClient) + } + // make sure pod event is handled + time.Sleep(100 * time.Millisecond) + + eventuallyExpectNumberOfPolicies(1) + eventuallyExpectConfig(noMatchPolicy.Name, expectedPolicy, expectedRefs) + }, + Entry("Gateway pod in terminating state", true), + Entry("Gateway pod in not ready state", false), + ) + + DescribeTable("reconciles a pod gateway in terminating or not ready state that is one of two pods that matches two policies", func( + terminating bool, + ) { + initController([]runtime.Object{namespaceGW, namespaceTarget, namespaceTarget2, targetPod1, targetPod2, pod1, pod2}, + []runtime.Object{dynamicPolicy, dynamicPolicyDiffTargetNS}) + namespaceGWWith2Pods := newNamespaceWithPods(namespaceGW.Name, pod1, pod2) + expectedPolicy1, expectedRefs1 := expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTarget2WithPod}, + nil, + []*namespaceWithPods{namespaceGWWith2Pods}, false) + + expectedPolicy2, expectedRefs2 := expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTargetWithPod}, + nil, + []*namespaceWithPods{namespaceGWWith2Pods}, false) + + eventuallyExpectNumberOfPolicies(2) + eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) + eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) + + if terminating { + By("Setting deletion timestamp for the ex gw pod") + setPodDeletionTimestamp(pod1, &metav1.Time{Time: time.Now().Add(1000 * time.Second)}, fakeClient) + } else { + By("Updating the ex gw pod status to mark it as not ready") + setPodConditionReady(pod1, corev1.ConditionFalse, fakeClient) + } + + namespaceGWWith1Pod := newNamespaceWithPods(namespaceGW.Name, pod2) + + expectedPolicy1, expectedRefs1 = expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTarget2WithPod}, + nil, + []*namespaceWithPods{namespaceGWWith1Pod}, false) + + expectedPolicy2, expectedRefs2 = expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTargetWithPod}, + nil, + []*namespaceWithPods{namespaceGWWith1Pod}, false) + + eventuallyExpectNumberOfPolicies(2) + eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) + eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) + + if terminating { + By("Removing deletion timestamp for the ex gw pod") + setPodDeletionTimestamp(pod1, nil, fakeClient) + } else { + By("Updating the ex gw pod status to mark it as ready") + setPodConditionReady(pod1, corev1.ConditionTrue, fakeClient) + } + + expectedPolicy1, expectedRefs1 = expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTarget2WithPod}, + nil, + []*namespaceWithPods{namespaceGWWith2Pods}, false) + + expectedPolicy2, expectedRefs2 = expectedPolicyStateAndRefs( + []*namespaceWithPods{namespaceTargetWithPod}, + nil, + []*namespaceWithPods{namespaceGWWith2Pods}, false) + + eventuallyExpectNumberOfPolicies(2) + eventuallyExpectConfig(dynamicPolicy.Name, expectedPolicy1, expectedRefs1) + eventuallyExpectConfig(dynamicPolicyDiffTargetNS.Name, expectedPolicy2, expectedRefs2) + }, + Entry("Gateway pod in terminating state", true), + Entry("Gateway pod in not ready state", false), + ) + }) }) func deletePod(pod *corev1.Pod, fakeClient *fake.Clientset) { @@ -478,6 +635,36 @@ func updatePodStatus(pod *corev1.Pod, podStatus corev1.PodStatus) { Expect(err).NotTo(HaveOccurred()) } +func setPodDeletionTimestamp(pod *corev1.Pod, deletionTimestamp *metav1.Time, fakeClient *fake.Clientset) { + p, err := fakeClient.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + incrementResourceVersion(p) + p.DeletionTimestamp = deletionTimestamp + _, err = fakeClient.CoreV1().Pods(pod.Namespace).Update(context.Background(), p, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) +} + +func setPodConditionReady(pod *corev1.Pod, condStatus corev1.ConditionStatus, fakeClient *fake.Clientset) { + p, err := fakeClient.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + incrementResourceVersion(p) + if p.Status.Conditions != nil { + for i := range p.Status.Conditions { + if p.Status.Conditions[i].Type == corev1.PodReady { + p.Status.Conditions[i].Status = condStatus + } + } + } else { + notReadyCondition := corev1.PodCondition{ + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + } + p.Status.Conditions = []corev1.PodCondition{notReadyCondition} + } + _, err = fakeClient.CoreV1().Pods(pod.Namespace).Update(context.Background(), p, metav1.UpdateOptions{}) + Expect(err).NotTo(HaveOccurred()) +} + func incrementResourceVersion(obj metav1.Object) { var rs int64 if obj.GetResourceVersion() != "" { diff --git a/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go b/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go index 802f31e7bb..266312ce2c 100644 --- a/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go +++ b/go-controller/pkg/ovn/controller/apbroute/external_controller_policy_test.go @@ -15,7 +15,7 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" adminpolicybasedrouteapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" @@ -40,8 +40,16 @@ func newPodWithPhaseAndIP(podName, namespace string, phase corev1.PodPhase, podI p := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: namespace, Labels: labels}, - Spec: corev1.PodSpec{NodeName: "node"}, - Status: corev1.PodStatus{Phase: phase}, + Spec: corev1.PodSpec{NodeName: "node"}, + Status: corev1.PodStatus{ + Phase: phase, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, } if len(podIP) > 0 { p.Annotations = map[string]string{nettypes.NetworkStatusAnnot: fmt.Sprintf(network_status, podIP)} diff --git a/go-controller/pkg/ovn/controller/apbroute/master_controller.go b/go-controller/pkg/ovn/controller/apbroute/master_controller.go index 324da524bd..82549c35dd 100644 --- a/go-controller/pkg/ovn/controller/apbroute/master_controller.go +++ b/go-controller/pkg/ovn/controller/apbroute/master_controller.go @@ -14,7 +14,7 @@ import ( corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" adminpolicybasedrouteapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" adminpolicybasedrouteapply "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/applyconfiguration/adminpolicybasedroute/v1" diff --git a/go-controller/pkg/ovn/controller/apbroute/network_client.go b/go-controller/pkg/ovn/controller/apbroute/network_client.go index 2f3de1da3a..5faca37f55 100644 --- a/go-controller/pkg/ovn/controller/apbroute/network_client.go +++ b/go-controller/pkg/ovn/controller/apbroute/network_client.go @@ -14,8 +14,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" adminpolicybasedroutelisters "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/listers/adminpolicybasedroute/v1" diff --git a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go index 5f19a1001b..b9c68eb594 100644 --- a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go +++ b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone.go @@ -23,8 +23,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressserviceapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1" @@ -50,7 +50,7 @@ type InitClusterEgressPoliciesFunc func(client libovsdbclient.Client, addressSet type EnsureNoRerouteNodePoliciesFunc func(client libovsdbclient.Client, addressSetFactory addressset.AddressSetFactory, networkName, controllerName, clusterRouter string, nodeLister corelisters.NodeLister, v4, v6 bool) error type DeleteLegacyDefaultNoRerouteNodePoliciesFunc func(nbClient libovsdbclient.Client, clusterRouter, nodeName string) error -type CreateDefaultRouteToExternalFunc func(nbClient libovsdbclient.Client, clusterRouter, gwRouterName string, clusterSubnets []config.CIDRNetworkEntry) error +type CreateDefaultRouteToExternalFunc func(nbClient libovsdbclient.Client, clusterRouter, gwRouterName string, clusterSubnets []config.CIDRNetworkEntry, gatewayIPs []*net.IPNet) error type Controller struct { // network information diff --git a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_node.go b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_node.go index b3a5f6c103..94db811bb2 100644 --- a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_node.go +++ b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_node.go @@ -151,9 +151,15 @@ func (c *Controller) syncNode(key string) error { return nil } + gatewayIPs, err := util.ParseNodeGatewayRouterJoinAddrs(n, types.DefaultNetworkName) + if err != nil { + return fmt.Errorf("failed to get default network gateway router join IPs for node %q: %w", n.Name, err) + } + // At this point the node exists and is ready if config.OVNKubernetesFeature.EnableInterconnect && c.zone != types.OvnDefaultZone && c.isNodeInLocalZone(n) { - if err := c.createDefaultRouteToExternalForIC(c.nbClient, c.GetNetworkScopedClusterRouterName(), c.GetNetworkScopedGWRouterName(nodeName), c.Subnets()); err != nil { + if err := c.createDefaultRouteToExternalForIC(c.nbClient, c.GetNetworkScopedClusterRouterName(), + c.GetNetworkScopedGWRouterName(nodeName), c.Subnets(), gatewayIPs); err != nil { return err } } diff --git a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_service.go b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_service.go index e19a3cd77f..192204171b 100644 --- a/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_service.go +++ b/go-controller/pkg/ovn/controller/egressservice/egressservice_zone_service.go @@ -13,7 +13,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/controller/network_qos/metrics.go b/go-controller/pkg/ovn/controller/network_qos/metrics.go index 96fa30834d..de5c6872ca 100644 --- a/go-controller/pkg/ovn/controller/network_qos/metrics.go +++ b/go-controller/pkg/ovn/controller/network_qos/metrics.go @@ -3,15 +3,15 @@ package networkqos import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" ) // Metrics to be exposed var ( nqosCount = prometheus.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: metrics.MetricOvnkubeNamespace, - Subsystem: metrics.MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "num_network_qoses", Help: "The total number of network qoses in the cluster", }, @@ -20,8 +20,8 @@ var ( nqosOvnOperationDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: metrics.MetricOvnkubeNamespace, - Subsystem: metrics.MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "nqos_ovn_operation_duration_ms", Help: "Time spent on reconciling a NetworkQoS event", Buckets: prometheus.ExponentialBuckets(.1, 2, 15), @@ -31,8 +31,8 @@ var ( nqosReconcileDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: metrics.MetricOvnkubeNamespace, - Subsystem: metrics.MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "nqos_creation_duration_ms", Help: "Time spent on reconciling a NetworkQoS event", Buckets: prometheus.ExponentialBuckets(.1, 2, 15), @@ -42,8 +42,8 @@ var ( nqosPodReconcileDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: metrics.MetricOvnkubeNamespace, - Subsystem: metrics.MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "nqos_deletion_duration_ms", Help: "Time spent on reconciling a Pod event", Buckets: prometheus.ExponentialBuckets(.1, 2, 15), @@ -53,8 +53,8 @@ var ( nqosNamespaceReconcileDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: metrics.MetricOvnkubeNamespace, - Subsystem: metrics.MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "nqos_ns_reconcile_duration_ms", Help: "Time spent on reconciling Namespace change for all Pods related to NetworkQoSes", Buckets: prometheus.ExponentialBuckets(.1, 2, 15), @@ -64,8 +64,8 @@ var ( nqosStatusPatchDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: metrics.MetricOvnkubeNamespace, - Subsystem: metrics.MetricOvnkubeSubsystemController, + Namespace: types.MetricOvnkubeNamespace, + Subsystem: types.MetricOvnkubeSubsystemController, Name: "nqos_status_patch_duration_ms", Help: "Time spent on patching the status of a NetworkQoS", }, diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go index 15511e35d8..3a75fc0f30 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_controller.go @@ -21,7 +21,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" networkqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" networkqosclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1/apis/clientset/versioned" diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go index 82eed9b07e..febc4d1953 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_ovnnb.go @@ -6,8 +6,8 @@ import ( "slices" "strconv" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go index fd92922479..4d771825ed 100644 --- a/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go +++ b/go-controller/pkg/ovn/controller/network_qos/network_qos_test.go @@ -15,7 +15,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" nqostype "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/networkqos/v1alpha1" diff --git a/go-controller/pkg/ovn/controller/services/lb_config.go b/go-controller/pkg/ovn/controller/services/lb_config.go index 2c47b1092b..b6bbd833ba 100644 --- a/go-controller/pkg/ovn/controller/services/lb_config.go +++ b/go-controller/pkg/ovn/controller/services/lb_config.go @@ -120,10 +120,16 @@ func makeNodeRouterTargetIPs(service *corev1.Service, node *nodeInfo, c *lbConfi } // OCP HACK END + // TODO: For all scenarios the lbAddress should be set to hostAddressesStr but this is breaking CI needs more investigation + lbAddresses := node.hostAddressesStr() + if config.OvnKubeNode.Mode == types.NodeModeFull { + lbAddresses = node.l3gatewayAddressesStr() + } + // Any targets local to the node need to have a special // harpin IP added, but only for the router LB - targetIPsV4, v4Updated := util.UpdateIPsSlice(targetIPsV4, node.l3gatewayAddressesStr(), []string{hostMasqueradeIPV4}) - targetIPsV6, v6Updated := util.UpdateIPsSlice(targetIPsV6, node.l3gatewayAddressesStr(), []string{hostMasqueradeIPV6}) + targetIPsV4, v4Updated := util.UpdateIPsSlice(targetIPsV4, lbAddresses, []string{hostMasqueradeIPV4}) + targetIPsV6, v6Updated := util.UpdateIPsSlice(targetIPsV6, lbAddresses, []string{hostMasqueradeIPV6}) // Local endpoints are a subset of cluster endpoints, so it is enough to compare their length v4Changed = len(targetIPsV4) != len(c.clusterEndpoints.V4IPs) || v4Updated diff --git a/go-controller/pkg/ovn/controller/services/loadbalancer.go b/go-controller/pkg/ovn/controller/services/loadbalancer.go index ba4eebc43a..025bb80d95 100644 --- a/go-controller/pkg/ovn/controller/services/loadbalancer.go +++ b/go-controller/pkg/ovn/controller/services/loadbalancer.go @@ -10,11 +10,11 @@ import ( "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/apis/core" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/recorders" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" @@ -283,7 +283,7 @@ func EnsureLBs(nbClient libovsdbclient.Client, service *corev1.Service, existing return err } - recordOps, txOkCallBack, _, err := metrics.GetConfigDurationRecorder().AddOVN(nbClient, "service", + recordOps, txOkCallBack, _, err := recorders.GetConfigDurationRecorder().AddOVN(nbClient, "service", service.Namespace, service.Name) if err != nil { klog.Errorf("Failed to record config duration: %v", err) diff --git a/go-controller/pkg/ovn/controller/services/node_tracker.go b/go-controller/pkg/ovn/controller/services/node_tracker.go index 0ee0997eda..9fecf577c1 100644 --- a/go-controller/pkg/ovn/controller/services/node_tracker.go +++ b/go-controller/pkg/ovn/controller/services/node_tracker.go @@ -119,7 +119,7 @@ func (nt *nodeTracker) Start(nodeInformer coreinformers.NodeInformer) (cache.Res // - node changes its zone // - node becomes a hybrid overlay node from a ovn node or vice verse // . No need to trigger update for any other field change. - if util.NodeSubnetAnnotationChanged(oldObj, newObj) || + if util.NodeSubnetAnnotationChangedForNetwork(oldObj, newObj, nt.netInfo.GetNetworkName()) || util.NodeL3GatewayAnnotationChanged(oldObj, newObj) || oldObj.Name != newObj.Name || util.NodeHostCIDRsAnnotationChanged(oldObj, newObj) || @@ -169,7 +169,7 @@ func (nt *nodeTracker) updateNodeInfo(nodeName, switchName, routerName, chassisI ni.podSubnets = append(ni.podSubnets, *podSubnets[i]) // de-pointer } - klog.Infof("Node %s switch + router changed, syncing services", nodeName) + klog.Infof("Node %s switch + router changed, syncing services in network %q", nodeName, nt.netInfo.GetNetworkName()) nt.Lock() defer nt.Unlock() @@ -208,7 +208,7 @@ func (nt *nodeTracker) removeNode(nodeName string) { // The switch exists when the HostSubnet annotation is set. // The gateway router will exist sometime after the L3Gateway annotation is set. func (nt *nodeTracker) updateNode(node *corev1.Node) { - klog.V(2).Infof("Processing possible switch / router updates for node %s", node.Name) + klog.V(2).Infof("Processing possible switch / router updates for node %s in network %q", node.Name, nt.netInfo.GetNetworkName()) var hsn []*net.IPNet var err error if nt.netInfo.TopologyType() == types.Layer2Topology { diff --git a/go-controller/pkg/ovn/controller/services/repair.go b/go-controller/pkg/ovn/controller/services/repair.go index a9d37389fa..169bc64069 100644 --- a/go-controller/pkg/ovn/controller/services/repair.go +++ b/go-controller/pkg/ovn/controller/services/repair.go @@ -11,8 +11,8 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/controller/services/services_controller.go b/go-controller/pkg/ovn/controller/services/services_controller.go index 031a38a0d6..71528380e0 100644 --- a/go-controller/pkg/ovn/controller/services/services_controller.go +++ b/go-controller/pkg/ovn/controller/services/services_controller.go @@ -28,14 +28,15 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" globalconfig "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/recorders" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -167,6 +168,11 @@ type Controller struct { useTemplates bool netInfo util.NetInfo + + // handlers stored for shutdown + nodeHandler cache.ResourceEventHandlerRegistration + svcHandler cache.ResourceEventHandlerRegistration + endpointHandler cache.ResourceEventHandlerRegistration } // Run will not return until stopCh is closed. workers determines how many @@ -179,15 +185,15 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, wg *sync.WaitGroup // wait until we're told to stop <-stopCh - klog.Infof("Shutting down controller %s for network=%s", controllerName, c.netInfo.GetNetworkName()) - c.queue.ShutDown() + c.Cleanup() }() c.useLBGroups = useLBGroups c.useTemplates = useTemplates klog.Infof("Starting controller %s for network=%s", controllerName, c.netInfo.GetNetworkName()) - nodeHandler, err := c.nodeTracker.Start(c.nodeInformer) + var err error + c.nodeHandler, err = c.nodeTracker.Start(c.nodeInformer) if err != nil { return err } @@ -196,12 +202,12 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, wg *sync.WaitGroup c.startupDoneLock.Lock() c.startupDone = false c.startupDoneLock.Unlock() - if !util.WaitForHandlerSyncWithTimeout(nodeControllerName, stopCh, types.HandlerSyncTimeout, nodeHandler.HasSynced) { + if !util.WaitForHandlerSyncWithTimeout(nodeControllerName, stopCh, types.HandlerSyncTimeout, c.nodeHandler.HasSynced) { return fmt.Errorf("error syncing node tracker handler") } klog.Infof("Setting up event handlers for services for network=%s", c.netInfo.GetNetworkName()) - svcHandler, err := c.serviceInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ + c.svcHandler, err = c.serviceInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace(cache.ResourceEventHandlerFuncs{ AddFunc: c.onServiceAdd, UpdateFunc: c.onServiceUpdate, DeleteFunc: c.onServiceDelete, @@ -211,7 +217,7 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, wg *sync.WaitGroup } klog.Infof("Setting up event handlers for endpoint slices for network=%s", c.netInfo.GetNetworkName()) - endpointHandler, err := c.endpointSliceInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace( + c.endpointHandler, err = c.endpointSliceInformer.Informer().AddEventHandler(factory.WithUpdateHandlingForObjReplace( // Filter out endpointslices that don't belong to this network (i.e. keep only kube-generated endpointslices if // on default network, keep only mirrored endpointslices for this network if on UDN) util.GetEndpointSlicesEventHandlerForNetwork( @@ -226,7 +232,7 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, wg *sync.WaitGroup } klog.Infof("Waiting for service and endpoint handlers to sync for network=%s", c.netInfo.GetNetworkName()) - if !util.WaitForHandlerSyncWithTimeout(controllerName, stopCh, types.HandlerSyncTimeout, svcHandler.HasSynced, endpointHandler.HasSynced) { + if !util.WaitForHandlerSyncWithTimeout(controllerName, stopCh, types.HandlerSyncTimeout, c.svcHandler.HasSynced, c.endpointHandler.HasSynced) { return fmt.Errorf("error syncing service and endpoint handlers") } @@ -254,6 +260,27 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}, wg *sync.WaitGroup return nil } +func (c *Controller) Cleanup() { + klog.Infof("Shutting down controller %s for network=%s", controllerName, c.netInfo.GetNetworkName()) + c.queue.ShutDown() + + if c.nodeHandler != nil { + if err := c.nodeInformer.Informer().RemoveEventHandler(c.nodeHandler); err != nil { + klog.Errorf("Failed to remove node handler for network %s: %v", c.netInfo.GetNetworkName(), err) + } + } + if c.svcHandler != nil { + if err := c.serviceInformer.Informer().RemoveEventHandler(c.svcHandler); err != nil { + klog.Errorf("Failed to remove service handler for network %s: %v", c.netInfo.GetNetworkName(), err) + } + } + if c.endpointHandler != nil { + if err := c.endpointSliceInformer.Informer().RemoveEventHandler(c.endpointHandler); err != nil { + klog.Errorf("Failed to remove endpoint handler for network %s: %v", c.netInfo.GetNetworkName(), err) + } + } +} + // worker runs a worker thread that just dequeues items, processes them, and // marks them done. You may run as many of these in parallel as you wish; the // workqueue guarantees that they will not end up processing the same service @@ -282,7 +309,7 @@ func (c *Controller) handleErr(err error, key string) { klog.ErrorS(err, "Failed to split meta namespace cache key", "key", key) } if err == nil { - metrics.GetConfigDurationRecorder().End("service", ns, name) + recorders.GetConfigDurationRecorder().End("service", ns, name) c.queue.Forget(key) return } @@ -296,7 +323,7 @@ func (c *Controller) handleErr(err error, key string) { } klog.Warningf("Dropping service %q out of the queue for network=%s: %v", key, c.netInfo.GetNetworkName(), err) - metrics.GetConfigDurationRecorder().End("service", ns, name) + recorders.GetConfigDurationRecorder().End("service", ns, name) c.queue.Forget(key) utilruntime.HandleError(err) } @@ -609,7 +636,7 @@ func (c *Controller) onServiceAdd(obj interface{}) { if c.skipService(service.Name, service.Namespace) { return } - metrics.GetConfigDurationRecorder().Start("service", service.Namespace, service.Name) + recorders.GetConfigDurationRecorder().Start("service", service.Namespace, service.Name) klog.V(5).Infof("Adding service %s for network=%s", key, c.netInfo.GetNetworkName()) c.queue.Add(key) } @@ -631,7 +658,7 @@ func (c *Controller) onServiceUpdate(oldObj, newObj interface{}) { return } - metrics.GetConfigDurationRecorder().Start("service", newService.Namespace, newService.Name) + recorders.GetConfigDurationRecorder().Start("service", newService.Namespace, newService.Name) c.queue.Add(key) } } @@ -651,7 +678,7 @@ func (c *Controller) onServiceDelete(obj interface{}) { klog.V(4).Infof("Deleting service %s for network=%s", key, c.netInfo.GetNetworkName()) - metrics.GetConfigDurationRecorder().Start("service", service.Namespace, service.Name) + recorders.GetConfigDurationRecorder().Start("service", service.Namespace, service.Name) c.queue.Add(key) } diff --git a/go-controller/pkg/ovn/controller/services/services_controller_test.go b/go-controller/pkg/ovn/controller/services/services_controller_test.go index 6937f6beca..777d175628 100644 --- a/go-controller/pkg/ovn/controller/services/services_controller_test.go +++ b/go-controller/pkg/ovn/controller/services/services_controller_test.go @@ -21,7 +21,7 @@ import ( utilnet "k8s.io/utils/net" "k8s.io/utils/ptr" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" diff --git a/go-controller/pkg/ovn/controller/services/svc_template_var.go b/go-controller/pkg/ovn/controller/services/svc_template_var.go index 2ffcd03cc7..8cf4ee640b 100644 --- a/go-controller/pkg/ovn/controller/services/svc_template_var.go +++ b/go-controller/pkg/ovn/controller/services/svc_template_var.go @@ -8,8 +8,8 @@ import ( corev1 "k8s.io/api/core/v1" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc.go b/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc.go index 59ac681e07..c96bc3a36d 100644 --- a/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc.go +++ b/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc.go @@ -17,7 +17,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc_test.go b/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc_test.go index fd0e09545e..7d3cd9e72f 100644 --- a/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc_test.go +++ b/go-controller/pkg/ovn/controller/udnenabledsvc/udn_enabled_svc_test.go @@ -15,7 +15,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/controller/unidling/unidle.go b/go-controller/pkg/ovn/controller/unidling/unidle.go index bcad0edf4c..d3c65e10fa 100644 --- a/go-controller/pkg/ovn/controller/unidling/unidle.go +++ b/go-controller/pkg/ovn/controller/unidling/unidle.go @@ -12,10 +12,10 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - libovsdbcache "github.com/ovn-org/libovsdb/cache" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbcache "github.com/ovn-kubernetes/libovsdb/cache" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" diff --git a/go-controller/pkg/ovn/controller/unidling/unidle_test.go b/go-controller/pkg/ovn/controller/unidling/unidle_test.go index 039968d696..3317b65c00 100644 --- a/go-controller/pkg/ovn/controller/unidling/unidle_test.go +++ b/go-controller/pkg/ovn/controller/unidling/unidle_test.go @@ -13,7 +13,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" diff --git a/go-controller/pkg/ovn/copp.go b/go-controller/pkg/ovn/copp.go index 4afc0bba76..39f2f092d4 100644 --- a/go-controller/pkg/ovn/copp.go +++ b/go-controller/pkg/ovn/copp.go @@ -3,7 +3,7 @@ package ovn import ( "fmt" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/default_network_controller.go b/go-controller/pkg/ovn/default_network_controller.go index cf6886e846..26ad651206 100644 --- a/go-controller/pkg/ovn/default_network_controller.go +++ b/go-controller/pkg/ovn/default_network_controller.go @@ -15,6 +15,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + hotypes "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controller" egressfirewall "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1" @@ -22,6 +23,7 @@ import ( egressqoslisters "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/listers/egressqos/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/recorders" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/observability" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" @@ -129,6 +131,7 @@ type DefaultNetworkController struct { syncZoneICFailed sync.Map syncHostNetAddrSetFailed sync.Map syncEIPNodeRerouteFailed sync.Map + syncEIPNodeFailed sync.Map // variable to determine if all pods present on the node during startup have been processed // updated atomically @@ -356,14 +359,14 @@ func (oc *DefaultNetworkController) Stop() { // // If true, then either quit or perform a complete reconfiguration of the cluster (recreate switches/routers with new subnet values) func (oc *DefaultNetworkController) init() error { - existingNodes, err := oc.kube.GetNodes() + existingNodes, err := oc.watchFactory.GetNodes() if err != nil { klog.Errorf("Error in fetching nodes: %v", err) return err } klog.V(5).Infof("Existing number of nodes: %d", len(existingNodes)) - // FIXME: When https://github.com/ovn-org/libovsdb/issues/235 is fixed, + // FIXME: When https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed, // use IsTableSupported(nbdb.LoadBalancerGroup). if _, _, err := util.RunOVNNbctl("--columns=_uuid", "list", "Load_Balancer_Group"); err != nil { klog.Warningf("Load Balancer Group support enabled, however version of OVN in use does not support Load Balancer Groups.") @@ -657,11 +660,11 @@ func (h *defaultNetworkControllerEventHandler) RecordAddEvent(obj interface{}) { pod := obj.(*corev1.Pod) klog.V(5).Infof("Recording add event on pod %s/%s", pod.Namespace, pod.Name) h.oc.podRecorder.AddPod(pod.UID) - metrics.GetConfigDurationRecorder().Start("pod", pod.Namespace, pod.Name) + recorders.GetConfigDurationRecorder().Start("pod", pod.Namespace, pod.Name) case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording add event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) } } @@ -671,11 +674,11 @@ func (h *defaultNetworkControllerEventHandler) RecordUpdateEvent(obj interface{} case factory.PodType: pod := obj.(*corev1.Pod) klog.V(5).Infof("Recording update event on pod %s/%s", pod.Namespace, pod.Name) - metrics.GetConfigDurationRecorder().Start("pod", pod.Namespace, pod.Name) + recorders.GetConfigDurationRecorder().Start("pod", pod.Namespace, pod.Name) case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording update event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) } } @@ -686,11 +689,11 @@ func (h *defaultNetworkControllerEventHandler) RecordDeleteEvent(obj interface{} pod := obj.(*corev1.Pod) klog.V(5).Infof("Recording delete event on pod %s/%s", pod.Namespace, pod.Name) h.oc.podRecorder.CleanPod(pod.UID) - metrics.GetConfigDurationRecorder().Start("pod", pod.Namespace, pod.Name) + recorders.GetConfigDurationRecorder().Start("pod", pod.Namespace, pod.Name) case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording delete event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().Start("networkpolicy", np.Namespace, np.Name) } } @@ -700,11 +703,11 @@ func (h *defaultNetworkControllerEventHandler) RecordSuccessEvent(obj interface{ case factory.PodType: pod := obj.(*corev1.Pod) klog.V(5).Infof("Recording success event on pod %s/%s", pod.Namespace, pod.Name) - metrics.GetConfigDurationRecorder().End("pod", pod.Namespace, pod.Name) + recorders.GetConfigDurationRecorder().End("pod", pod.Namespace, pod.Name) case factory.PolicyType: np := obj.(*knet.NetworkPolicy) klog.V(5).Infof("Recording success event on network policy %s/%s", np.Namespace, np.Name) - metrics.GetConfigDurationRecorder().End("networkpolicy", np.Namespace, np.Name) + recorders.GetConfigDurationRecorder().End("networkpolicy", np.Namespace, np.Name) } } @@ -761,6 +764,16 @@ func (h *defaultNetworkControllerEventHandler) AddResource(obj interface{}, from var aggregatedErrors []error if h.oc.isLocalZoneNode(node) { var nodeParams *nodeSyncs + hoNeedsCleanup := false + if !config.HybridOverlay.Enabled { + // check if the node has the stale annotations on it to signal that we need to clean up + if _, exists := node.Annotations[hotypes.HybridOverlayDRIP]; exists { + hoNeedsCleanup = true + } + if _, exist := node.Annotations[hotypes.HybridOverlayDRMAC]; exist { + hoNeedsCleanup = true + } + } if fromRetryLoop { _, nodeSync := h.oc.addNodeFailed.Load(node.Name) _, clusterRtrSync := h.oc.nodeClusterRouterPortFailed.Load(node.Name) @@ -773,7 +786,7 @@ func (h *defaultNetworkControllerEventHandler) AddResource(obj interface{}, from syncClusterRouterPort: clusterRtrSync, syncMgmtPort: mgmtSync, syncGw: gwSync, - syncHo: hoSync, + syncHo: hoSync || hoNeedsCleanup, syncZoneIC: zoneICSync} } else { nodeParams = &nodeSyncs{ @@ -781,10 +794,9 @@ func (h *defaultNetworkControllerEventHandler) AddResource(obj interface{}, from syncClusterRouterPort: true, syncMgmtPort: true, syncGw: true, - syncHo: config.HybridOverlay.Enabled, + syncHo: config.HybridOverlay.Enabled || hoNeedsCleanup, syncZoneIC: config.OVNKubernetesFeature.EnableInterconnect} } - if err = h.oc.addUpdateLocalNodeEvent(node, nodeParams); err != nil { klog.Infof("Node add failed for %s, will try again later: %v", node.Name, err) @@ -832,8 +844,10 @@ func (h *defaultNetworkControllerEventHandler) AddResource(obj interface{}, from h.oc.eIPC.nodeZoneState.UnlockKey(node.Name) shouldSyncReroute := true + shouldSyncEIPNode := true if fromRetryLoop { _, shouldSyncReroute = h.oc.syncEIPNodeRerouteFailed.Load(node.Name) + _, shouldSyncEIPNode = h.oc.syncEIPNodeFailed.Load(node.Name) } if shouldSyncReroute { @@ -851,10 +865,19 @@ func (h *defaultNetworkControllerEventHandler) AddResource(obj interface{}, from h.oc.syncEIPNodeRerouteFailed.Store(node.Name, true) return err } + h.oc.syncEIPNodeRerouteFailed.Delete(node.Name) + } + if shouldSyncEIPNode { + // Add routing specific to Egress IP NOTE: GARP configuration that + // Egress IP depends on is added from the gateway reconciliation logic + err := h.oc.eIPC.addEgressNode(node) + if err != nil { + h.oc.syncEIPNodeFailed.Store(node.Name, true) + return err + } + h.oc.syncEIPNodeFailed.Delete(node.Name) } - // Add routing specific to Egress IP NOTE: GARP configuration that - // Egress IP depends on is added from the gateway reconciliation logic - return h.oc.eIPC.addEgressNode(node) + return nil case factory.NamespaceType: ns, ok := obj.(*corev1.Namespace) @@ -926,6 +949,7 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int zoneClusterChanged := h.oc.nodeZoneClusterChanged(oldNode, newNode, newNodeIsLocalZoneNode, types.DefaultNetworkName) nodeSubnetChange := nodeSubnetChanged(oldNode, newNode, types.DefaultNetworkName) nodeEncapIPsChanged := util.NodeEncapIPsChanged(oldNode, newNode) + nodePrimaryDPUHostAddrChanged := util.NodePrimaryDPUHostAddrAnnotationChanged(oldNode, newNode) var aggregatedErrors []error if newNodeIsLocalZoneNode { @@ -940,6 +964,16 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int _, failed = h.oc.gatewaysFailed.Load(newNode.Name) gwSync := failed || gatewayChanged(oldNode, newNode) || nodeSubnetChange || hostCIDRsChanged(oldNode, newNode) || nodeGatewayMTUSupportChanged(oldNode, newNode) + hoNeedsCleanup := false + if !config.HybridOverlay.Enabled { + // check if the node has the stale annotations on it to signal that we need to clean up + if _, exists := newNode.Annotations[hotypes.HybridOverlayDRIP]; exists { + hoNeedsCleanup = true + } + if _, exist := newNode.Annotations[hotypes.HybridOverlayDRMAC]; exist { + hoNeedsCleanup = true + } + } _, hoSync := h.oc.hybridOverlayFailed.Load(newNode.Name) _, syncZoneIC := h.oc.syncZoneICFailed.Load(newNode.Name) syncZoneIC = syncZoneIC || zoneClusterChanged || primaryAddrChanged(oldNode, newNode) @@ -948,12 +982,12 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int syncClusterRouterPort: clusterRtrSync, syncMgmtPort: mgmtSync, syncGw: gwSync, - syncHo: hoSync, + syncHo: hoSync || hoNeedsCleanup, syncZoneIC: syncZoneIC, } } else { - klog.Infof("Node %s moved from the remote zone %s to local zone %s.", - newNode.Name, util.GetNodeZone(oldNode), util.GetNodeZone(newNode)) + klog.Infof("Node %s moved from the remote zone %s to local zone %s, in network: %q", + newNode.Name, util.GetNodeZone(oldNode), util.GetNodeZone(newNode), h.oc.GetNetworkName()) // The node is now a local zone node. Trigger a full node sync. nodeSyncsParam = &nodeSyncs{ syncNode: true, @@ -963,7 +997,6 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int syncHo: true, syncZoneIC: config.OVNKubernetesFeature.EnableInterconnect} } - if err := h.oc.addUpdateLocalNodeEvent(newNode, nodeSyncsParam); err != nil { aggregatedErrors = append(aggregatedErrors, err) } @@ -974,10 +1007,17 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int // Also check if node subnet changed, so static routes are properly set // Also check if the node is used to be a hybrid overlay node syncZoneIC = syncZoneIC || h.oc.isLocalZoneNode(oldNode) || nodeSubnetChange || zoneClusterChanged || - switchToOvnNode || nodeEncapIPsChanged + switchToOvnNode || nodeEncapIPsChanged || nodePrimaryDPUHostAddrChanged if syncZoneIC { - klog.Infof("Node %s in remote zone %s needs interconnect zone sync up. Zone cluster changed: %v", - newNode.Name, util.GetNodeZone(newNode), zoneClusterChanged) + klog.Infof("Node %q in remote zone %q, network %q, needs interconnect zone sync up. Zone cluster changed: %v", + newNode.Name, util.GetNodeZone(newNode), h.oc.GetNetworkName(), zoneClusterChanged) + } + // Reprovisioning the DPU (including OVS), which is pinned to a host, will change the system ID but not the node. + if config.OvnKubeNode.Mode == types.NodeModeDPU && nodeChassisChanged(oldNode, newNode) { + if err := h.oc.zoneChassisHandler.DeleteRemoteZoneNode(oldNode); err != nil { + aggregatedErrors = append(aggregatedErrors, err) + } + syncZoneIC = true } if err := h.oc.addUpdateRemoteNodeEvent(newNode, syncZoneIC); err != nil { aggregatedErrors = append(aggregatedErrors, err) @@ -1018,24 +1058,36 @@ func (h *defaultNetworkControllerEventHandler) UpdateResource(oldObj, newObj int h.oc.eIPC.nodeZoneState.Store(newNode.Name, h.oc.isLocalZoneNode(newNode)) h.oc.eIPC.nodeZoneState.UnlockKey(newNode.Name) - _, failed := h.oc.syncEIPNodeRerouteFailed.Load(newNode.Name) + _, syncEIPNodeRerouteFailed := h.oc.syncEIPNodeRerouteFailed.Load(newNode.Name) // node moved from remote -> local or previously failed reroute config - if (!h.oc.isLocalZoneNode(oldNode) || failed) && h.oc.isLocalZoneNode(newNode) { + if (!h.oc.isLocalZoneNode(oldNode) || syncEIPNodeRerouteFailed) && h.oc.isLocalZoneNode(newNode) { if err := h.oc.eIPC.ensureDefaultNoRerouteQoSRules(newNode.Name); err != nil { return err } } // update the nodeIP in the default-reRoute (102 priority) destination address-set - if failed || util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) { + if syncEIPNodeRerouteFailed || util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) { klog.Infof("Egress IP detected IP address change for node %s. Updating no re-route policies", newNode.Name) err := h.oc.eIPC.ensureDefaultNoRerouteNodePolicies() if err != nil { + h.oc.syncEIPNodeRerouteFailed.Store(newNode.Name, true) return err } + h.oc.syncEIPNodeRerouteFailed.Delete(newNode.Name) } - h.oc.syncEIPNodeRerouteFailed.Delete(newNode.Name) - return h.oc.eIPC.addEgressNode(newNode) + + _, syncEIPNodeFailed := h.oc.syncEIPNodeFailed.Load(newNode.Name) + // update only if the GR join IP changed for default network + if syncEIPNodeFailed || joinCIDRChanged(oldNode, newNode, h.oc.GetNetworkName()) { + err := h.oc.eIPC.addEgressNode(newNode) + if err != nil { + h.oc.syncEIPNodeFailed.Store(newNode.Name, true) + return err + } + h.oc.syncEIPNodeFailed.Delete(newNode.Name) + } + return nil case factory.NamespaceType: oldNs, newNs := oldObj.(*corev1.Namespace), newObj.(*corev1.Namespace) @@ -1098,6 +1150,7 @@ func (h *defaultNetworkControllerEventHandler) DeleteResource(obj, cachedObj int h.oc.eIPC.nodeZoneState.Delete(node.Name) h.oc.eIPC.nodeZoneState.UnlockKey(node.Name) h.oc.syncEIPNodeRerouteFailed.Delete(node.Name) + h.oc.syncEIPNodeFailed.Delete(node.Name) return nil case factory.NamespaceType: diff --git a/go-controller/pkg/ovn/dns_name_resolver/dns.go b/go-controller/pkg/ovn/dns_name_resolver/dns.go index f2ae3ddc2e..17b6b76471 100644 --- a/go-controller/pkg/ovn/dns_name_resolver/dns.go +++ b/go-controller/pkg/ovn/dns_name_resolver/dns.go @@ -9,7 +9,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/dns_name_resolver/dns_name_resolver.go b/go-controller/pkg/ovn/dns_name_resolver/dns_name_resolver.go index 0b8ad32f11..b0d29f0d3d 100644 --- a/go-controller/pkg/ovn/dns_name_resolver/dns_name_resolver.go +++ b/go-controller/pkg/ovn/dns_name_resolver/dns_name_resolver.go @@ -1,7 +1,7 @@ package dnsnameresolver import ( - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" ) diff --git a/go-controller/pkg/ovn/dns_name_resolver/external_dns.go b/go-controller/pkg/ovn/dns_name_resolver/external_dns.go index 57bff7dec2..cd542c48e1 100644 --- a/go-controller/pkg/ovn/dns_name_resolver/external_dns.go +++ b/go-controller/pkg/ovn/dns_name_resolver/external_dns.go @@ -14,7 +14,7 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controller" egressfirewalllister "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/listers/egressfirewall/v1" diff --git a/go-controller/pkg/ovn/dns_name_resolver/external_dns_test.go b/go-controller/pkg/ovn/dns_name_resolver/external_dns_test.go index 26e4107244..72661bd4ed 100644 --- a/go-controller/pkg/ovn/dns_name_resolver/external_dns_test.go +++ b/go-controller/pkg/ovn/dns_name_resolver/external_dns_test.go @@ -13,7 +13,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" diff --git a/go-controller/pkg/ovn/dns_name_resolver/external_dns_tracker.go b/go-controller/pkg/ovn/dns_name_resolver/external_dns_tracker.go index 8ace7203e2..730bd026af 100644 --- a/go-controller/pkg/ovn/dns_name_resolver/external_dns_tracker.go +++ b/go-controller/pkg/ovn/dns_name_resolver/external_dns_tracker.go @@ -8,7 +8,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/egressfirewall.go b/go-controller/pkg/ovn/egressfirewall.go index 4e49505d04..5b06d9c0f3 100644 --- a/go-controller/pkg/ovn/egressfirewall.go +++ b/go-controller/pkg/ovn/egressfirewall.go @@ -20,7 +20,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controller" @@ -467,7 +467,7 @@ func (oc *DefaultNetworkController) addEgressFirewallRules(ef *egressFirewall, p func (oc *DefaultNetworkController) createEgressFirewallACLOps(ops []ovsdb.Operation, ruleIdx int, match, action, namespace, pgName string, aclLogging *libovsdbutil.ACLLoggingLevels) ([]ovsdb.Operation, error) { aclIDs := oc.getEgressFirewallACLDbIDs(namespace, ruleIdx) priority := types.EgressFirewallStartPriority - ruleIdx - egressFirewallACL := libovsdbutil.BuildACL( + egressFirewallACL := libovsdbutil.BuildACLWithDefaultTier( aclIDs, priority, match, diff --git a/go-controller/pkg/ovn/egressgw.go b/go-controller/pkg/ovn/egressgw.go index 27f9e2b970..d9d8610aba 100644 --- a/go-controller/pkg/ovn/egressgw.go +++ b/go-controller/pkg/ovn/egressgw.go @@ -15,10 +15,11 @@ import ( ktypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" + v1pod "k8s.io/kubernetes/pkg/api/v1/pod" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" @@ -49,6 +50,13 @@ func (oc *DefaultNetworkController) addPodExternalGW(pod *corev1.Pod) error { klog.Infof("External gateway pod: %s, detected for namespace(s) %s", pod.Name, podRoutingNamespaceAnno) + // If an external gateway pod is in terminating or not ready state then don't add the + // routes for the external gateway pod + if util.PodTerminating(pod) || !v1pod.IsPodReadyConditionTrue(pod.Status) { + klog.Warningf("External gateway pod cannot serve traffic; it's in terminating or not ready state: %s/%s", pod.Namespace, pod.Name) + return nil + } + foundGws, err := getExGwPodIPs(pod) if err != nil { klog.Errorf("Error getting exgw IPs for pod: %s, error: %v", pod.Name, err) @@ -581,7 +589,7 @@ func (oc *DefaultNetworkController) deletePodSNAT(nodeName string, extIPs, podIP return nil } // Default network does not set any matches in Pod SNAT - ops, err := deletePodSNATOps(oc.nbClient, nil, oc.GetNetworkScopedGWRouterName(nodeName), extIPs, podIPNets, "") + ops, err := deletePodSNATOps(oc.nbClient, nil, oc.GetNetworkScopedGWRouterName(nodeName), extIPs, podIPNets) if err != nil { return err } @@ -631,8 +639,8 @@ func getExternalIPsGR(watchFactory *factory.WatchFactory, nodeName string) ([]*n // deletePodSNATOps creates ovsdb operation that removes per pod SNAT rules towards the nodeIP that are applied to the GR where the pod resides // used when disableSNATMultipleGWs=true -func deletePodSNATOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, gwRouterName string, extIPs, podIPNets []*net.IPNet, match string) ([]ovsdb.Operation, error) { - nats, err := buildPodSNAT(extIPs, podIPNets, match) +func deletePodSNATOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, gwRouterName string, extIPs, podIPNets []*net.IPNet) ([]ovsdb.Operation, error) { + nats, err := buildPodSNAT(extIPs, podIPNets, "") // for delete, match is not needed - we try to cleanup all the SNATs that match the isEquivalentNAT predicate if err != nil { return nil, err } @@ -649,15 +657,12 @@ func deletePodSNATOps(nbClient libovsdbclient.Client, ops []ovsdb.Operation, gwR // addOrUpdatePodSNAT adds or updates per pod SNAT rules towards the nodeIP that are applied to the GR where the pod resides // used when disableSNATMultipleGWs=true func addOrUpdatePodSNAT(nbClient libovsdbclient.Client, gwRouterName string, extIPs, podIfAddrs []*net.IPNet) error { - nats, err := buildPodSNAT(extIPs, podIfAddrs, "") + ops, err := addOrUpdatePodSNATOps(nbClient, gwRouterName, extIPs, podIfAddrs, "", nil) if err != nil { return err } - logicalRouter := nbdb.LogicalRouter{ - Name: gwRouterName, - } - if err := libovsdbops.CreateOrUpdateNATs(nbClient, &logicalRouter, nats...); err != nil { - return fmt.Errorf("failed to update SNAT for pods of router %s: %v", logicalRouter.Name, err) + if _, err = libovsdbops.TransactAndCheck(nbClient, ops); err != nil { + return fmt.Errorf("failed to update SNAT for pods of router %s: %v", gwRouterName, err) } return nil } @@ -665,14 +670,14 @@ func addOrUpdatePodSNAT(nbClient libovsdbclient.Client, gwRouterName string, ext // addOrUpdatePodSNATOps returns the operation that adds or updates per pod SNAT rules towards the nodeIP that are // applied to the GR where the pod resides // used when disableSNATMultipleGWs=true -func addOrUpdatePodSNATOps(nbClient libovsdbclient.Client, gwRouterName string, extIPs, podIfAddrs []*net.IPNet, match string, ops []ovsdb.Operation) ([]ovsdb.Operation, error) { - router := &nbdb.LogicalRouter{Name: gwRouterName} - nats, err := buildPodSNAT(extIPs, podIfAddrs, match) +func addOrUpdatePodSNATOps(nbClient libovsdbclient.Client, gwRouterName string, extIPs, podIfAddrs []*net.IPNet, snatMatch string, ops []ovsdb.Operation) ([]ovsdb.Operation, error) { + gwRouter := &nbdb.LogicalRouter{Name: gwRouterName} + nats, err := buildPodSNAT(extIPs, podIfAddrs, snatMatch) if err != nil { return nil, err } - if ops, err = libovsdbops.CreateOrUpdateNATsOps(nbClient, ops, router, nats...); err != nil { - return nil, fmt.Errorf("failed to update SNAT for pods of router: %s, error: %v", gwRouterName, err) + if ops, err = libovsdbops.CreateOrUpdateNATsOps(nbClient, ops, gwRouter, nats...); err != nil { + return nil, fmt.Errorf("failed to create ops to update SNAT for pods of router: %s, error: %v", gwRouterName, err) } return ops, nil } diff --git a/go-controller/pkg/ovn/egressgw_test.go b/go-controller/pkg/ovn/egressgw_test.go index fff6c32fa3..420f2f26e1 100644 --- a/go-controller/pkg/ovn/egressgw_test.go +++ b/go-controller/pkg/ovn/egressgw_test.go @@ -6,6 +6,7 @@ import ( "fmt" "net" "sync" + "time" nettypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" "github.com/onsi/ginkgo/v2" @@ -19,6 +20,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" adminpolicybasedrouteapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/apbroute" @@ -133,8 +135,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -169,8 +171,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -273,8 +275,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -309,8 +311,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -417,8 +419,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -463,8 +465,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -895,8 +897,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -966,8 +968,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1076,8 +1078,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1116,8 +1118,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1237,8 +1239,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1277,8 +1279,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1408,8 +1410,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1448,8 +1450,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1589,8 +1591,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1629,8 +1631,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1662,8 +1664,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1708,8 +1710,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1742,8 +1744,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1782,8 +1784,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1817,76 +1819,116 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, ), ) - }) - ginkgo.Context("on using bfd", func() { - ginkgo.It("should enable bfd only on the namespace gw when set", func() { - app.Action = func(*cli.Context) error { - - namespaceT := *newNamespace(namespaceName) - namespaceT.Annotations = map[string]string{"k8s.ovn.org/routing-external-gws": "9.0.0.1"} - namespaceT.Annotations["k8s.ovn.org/bfd-enabled"] = "" - namespaceX := *newNamespace("namespace2") + ginkgo.DescribeTable("reconciles a host networked pod in terminating or not ready state acting as a exgw for another namespace for existing pod", + func(bfd bool, + terminating bool, + beforeUpdateNB []libovsdbtest.TestData, + afterUpdateNB []libovsdbtest.TestData, + expectedNamespaceAnnotation string, + apbExternalRouteCRList *adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList) { + app.Action = func(*cli.Context) error { - t := newTPod( - "node1", - "10.128.1.0/24", - "10.128.1.2", - "10.128.1.1", - "myPod", - "10.128.1.3", - "0a:58:0a:80:01:03", - namespaceT.Name, - ) - gwPod := *newPod(namespaceX.Name, "gwPod", "node2", "10.0.0.1") - gwPod.Annotations = map[string]string{"k8s.ovn.org/routing-namespaces": namespaceT.Name} - gwPod.Spec.HostNetwork = true - fakeOvn.startWithDBSetup( - libovsdbtest.TestSetup{ - NBData: []libovsdbtest.TestData{ - &nbdb.LogicalSwitch{ - UUID: "node1", - Name: "node1", - }, - &nbdb.LogicalSwitch{ - UUID: "node2", - Name: "node2", - }, - &nbdb.LogicalRouter{ - UUID: "GR_node1-UUID", - Name: "GR_node1", + namespaceT := *newNamespace(namespaceName) + namespaceX := *newNamespace(namespace2Name) + t := newTPod( + "node1", + "10.128.1.0/24", + "10.128.1.2", + "10.128.1.1", + "myPod", + "10.128.1.3", + "0a:58:0a:80:01:03", + namespaceT.Name, + ) + gwPod := *newPod(namespaceX.Name, gwPodName, "node2", "9.0.0.1") + gwPod.Annotations = map[string]string{"k8s.ovn.org/routing-namespaces": namespaceT.Name} + if bfd { + gwPod.Annotations["k8s.ovn.org/bfd-enabled"] = "" + } + gwPod.Spec.HostNetwork = true + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + }, }, }, - }, - &corev1.NamespaceList{ - Items: []corev1.Namespace{ - namespaceT, + &corev1.NamespaceList{ + Items: []corev1.Namespace{ + namespaceT, namespaceX, + }, }, - }, - &corev1.NodeList{ - Items: []corev1.Node{ - *newNode("node1", "192.168.126.202/24"), - *newNode("node2", "192.168.126.50/24"), + &corev1.NodeList{ + Items: []corev1.Node{ + *newNode("node1", "192.168.126.202/24"), + *newNode("node2", "192.168.126.50/24"), + }, }, - }, - &corev1.PodList{ - Items: []corev1.Pod{ - *newPod(t.namespace, t.podName, t.nodeName, t.podIP), + &corev1.PodList{ + Items: []corev1.Pod{ + *newPod(t.namespace, t.podName, t.nodeName, t.podIP), + }, }, - }, - ) - t.populateLogicalSwitchCache(fakeOvn) - err := fakeOvn.controller.lsManager.AddOrUpdateSwitch("node2", []*net.IPNet{ovntest.MustParseIPNet("10.128.2.0/24")}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - injectNode(fakeOvn) - err = fakeOvn.controller.WatchNamespaces() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fakeOvn.controller.WatchPods() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + apbExternalRouteCRList, + ) + t.populateLogicalSwitchCache(fakeOvn) + err := fakeOvn.controller.lsManager.AddOrUpdateSwitch("node2", []*net.IPNet{ovntest.MustParseIPNet("10.128.2.0/24")}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + injectNode(fakeOvn) + err = fakeOvn.controller.WatchNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + fakeOvn.RunAPBExternalPolicyController() - _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(namespaceX.Name).Create(context.TODO(), &gwPod, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(namespaceX.Name).Create(context.TODO(), &gwPod, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(beforeUpdateNB)) + gomega.Eventually(func() string { + return getNamespaceAnnotations(fakeOvn.fakeClient.KubeClient, namespaceT.Name)[util.ExternalGatewayPodIPsAnnotation] + }).Should(gomega.Equal("9.0.0.1")) - finalNB := []libovsdbtest.TestData{ + if terminating { + ginkgo.By("Setting deletion timestamp for the ex gw pod") + gwPod.DeletionTimestamp = &metav1.Time{Time: time.Now().Add(1000 * time.Second)} + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(namespaceX.Name).Update(context.TODO(), &gwPod, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } else { + ginkgo.By("Updating the ex gw pod status to mark it as not ready") + notReadyCondition := corev1.PodCondition{ + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + } + gwPod.Status.Conditions = []corev1.PodCondition{notReadyCondition} + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(namespaceX.Name).UpdateStatus(context.TODO(), &gwPod, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(afterUpdateNB)) + gomega.Eventually(func() string { + return getNamespaceAnnotations(fakeOvn.fakeClient.KubeClient, namespaceT.Name)[util.ExternalGatewayPodIPsAnnotation] + }).Should(gomega.Equal(expectedNamespaceAnnotation)) + for _, apbRoutePolicy := range apbExternalRouteCRList.Items { + checkAPBRouteStatus(fakeOvn, apbRoutePolicy.Name, false) + } + return nil + } + + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }, + ginkgo.Entry("No BFD with ex gw pod in terminating state", false, true, + []libovsdbtest.TestData{ &nbdb.LogicalSwitchPort{ UUID: "lsp1", Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, @@ -1910,26 +1952,84 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { UUID: "node2", Name: "node2", }, - &nbdb.BFD{ - UUID: bfd1NamedUUID, - DstIP: "9.0.0.1", - LogicalPort: "rtoe-GR_node1", - }, &nbdb.LogicalRouterStaticRoute{ UUID: "static-route-1-UUID", IPPrefix: "10.128.1.3/32", Nexthop: "9.0.0.1", - BFD: &bfd1NamedUUID, Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, OutputPort: &logicalRouterPort, Options: map[string]string{ "ecmp_symmetric_reply": "true", }, }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{"static-route-1-UUID"}, + }, + }, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{}, + }, + }, + "", + &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{}, + ), + ginkgo.Entry("No BFD with ex gw pod in not ready state", false, false, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, &nbdb.LogicalRouterStaticRoute{ - UUID: "static-route-2-UUID", + UUID: "static-route-1-UUID", IPPrefix: "10.128.1.3/32", - Nexthop: "10.0.0.1", + Nexthop: "9.0.0.1", Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, OutputPort: &logicalRouterPort, Options: map[string]string{ @@ -1939,16 +2039,503 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { &nbdb.LogicalRouter{ UUID: "GR_node1-UUID", Name: "GR_node1", - StaticRoutes: []string{"static-route-1-UUID", "static-route-2-UUID"}, + StaticRoutes: []string{"static-route-1-UUID"}, }, - } - gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(finalNB)) - gomega.Eventually(func() string { - return getNamespaceAnnotations(fakeOvn.fakeClient.KubeClient, namespaceT.Name)[util.ExternalGatewayPodIPsAnnotation] - }).Should(gomega.Equal("10.0.0.1")) - return nil - } - + }, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{}, + }, + }, + "", + &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{}, + ), + ginkgo.Entry("BFD Enabled with ex gw pod in terminating state", true, true, []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.BFD{ + UUID: bfd1NamedUUID, + DstIP: "9.0.0.1", + LogicalPort: "rtoe-GR_node1", + }, + &nbdb.LogicalRouterStaticRoute{ + UUID: "static-route-1-UUID", + IPPrefix: "10.128.1.3/32", + Nexthop: "9.0.0.1", + BFD: &bfd1NamedUUID, + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + OutputPort: &logicalRouterPort, + Options: map[string]string{ + "ecmp_symmetric_reply": "true", + }, + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{"static-route-1-UUID"}, + }, + }, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{}, + }, + }, + "", + &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{}, + ), + ginkgo.Entry("BFD Enabled with ex gw pod in not ready state", true, false, []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.BFD{ + UUID: bfd1NamedUUID, + DstIP: "9.0.0.1", + LogicalPort: "rtoe-GR_node1", + }, + &nbdb.LogicalRouterStaticRoute{ + UUID: "static-route-1-UUID", + IPPrefix: "10.128.1.3/32", + Nexthop: "9.0.0.1", + BFD: &bfd1NamedUUID, + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + OutputPort: &logicalRouterPort, + Options: map[string]string{ + "ecmp_symmetric_reply": "true", + }, + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{"static-route-1-UUID"}, + }, + }, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{}, + }, + }, + "", + &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{}, + ), + ginkgo.Entry("No BFD with ex gw pod in terminating state and with overlapping APB External Route CR and annotation", false, true, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouterStaticRoute{ + UUID: "static-route-1-UUID", + IPPrefix: "10.128.1.3/32", + Nexthop: "9.0.0.1", + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + OutputPort: &logicalRouterPort, + Options: map[string]string{ + "ecmp_symmetric_reply": "true", + }, + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{"static-route-1-UUID"}, + }, + }, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{}, + }, + }, + "", + &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{ + Items: []adminpolicybasedrouteapi.AdminPolicyBasedExternalRoute{ + newPolicy("policy", + &metav1.LabelSelector{MatchLabels: map[string]string{"name": namespaceName}}, + nil, + false, + &metav1.LabelSelector{MatchLabels: map[string]string{"name": namespace2Name}}, + &metav1.LabelSelector{MatchLabels: map[string]string{"name": gwPodName}}, + false, + ""), + }, + }, + ), + ginkgo.Entry("No BFD with ex gw pod in not ready state and with overlapping APB External Route CR and annotation", false, false, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouterStaticRoute{ + UUID: "static-route-1-UUID", + IPPrefix: "10.128.1.3/32", + Nexthop: "9.0.0.1", + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + OutputPort: &logicalRouterPort, + Options: map[string]string{ + "ecmp_symmetric_reply": "true", + }, + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{"static-route-1-UUID"}, + }, + }, + []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + "requested-chassis": "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{}, + }, + }, + "", + &adminpolicybasedrouteapi.AdminPolicyBasedExternalRouteList{ + Items: []adminpolicybasedrouteapi.AdminPolicyBasedExternalRoute{ + newPolicy("policy", + &metav1.LabelSelector{MatchLabels: map[string]string{"name": namespaceName}}, + nil, + false, + &metav1.LabelSelector{MatchLabels: map[string]string{"name": namespace2Name}}, + &metav1.LabelSelector{MatchLabels: map[string]string{"name": gwPodName}}, + false, + ""), + }, + }, + ), + ) + }) + ginkgo.Context("on using bfd", func() { + ginkgo.It("should enable bfd only on the namespace gw when set", func() { + app.Action = func(*cli.Context) error { + + namespaceT := *newNamespace(namespaceName) + namespaceT.Annotations = map[string]string{"k8s.ovn.org/routing-external-gws": "9.0.0.1"} + namespaceT.Annotations["k8s.ovn.org/bfd-enabled"] = "" + namespaceX := *newNamespace("namespace2") + + t := newTPod( + "node1", + "10.128.1.0/24", + "10.128.1.2", + "10.128.1.1", + "myPod", + "10.128.1.3", + "0a:58:0a:80:01:03", + namespaceT.Name, + ) + gwPod := *newPod(namespaceX.Name, "gwPod", "node2", "10.0.0.1") + gwPod.Annotations = map[string]string{"k8s.ovn.org/routing-namespaces": namespaceT.Name} + gwPod.Spec.HostNetwork = true + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + }, + }, + }, + &corev1.NamespaceList{ + Items: []corev1.Namespace{ + namespaceT, + }, + }, + &corev1.NodeList{ + Items: []corev1.Node{ + *newNode("node1", "192.168.126.202/24"), + *newNode("node2", "192.168.126.50/24"), + }, + }, + &corev1.PodList{ + Items: []corev1.Pod{ + *newPod(t.namespace, t.podName, t.nodeName, t.podIP), + }, + }, + ) + t.populateLogicalSwitchCache(fakeOvn) + err := fakeOvn.controller.lsManager.AddOrUpdateSwitch("node2", []*net.IPNet{ovntest.MustParseIPNet("10.128.2.0/24")}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + injectNode(fakeOvn) + err = fakeOvn.controller.WatchNamespaces() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = fakeOvn.controller.WatchPods() + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Pods(namespaceX.Name).Create(context.TODO(), &gwPod, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + finalNB := []libovsdbtest.TestData{ + &nbdb.LogicalSwitchPort{ + UUID: "lsp1", + Addresses: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + ExternalIDs: map[string]string{ + "pod": "true", + "namespace": namespaceName, + }, + Name: "namespace1_myPod", + Options: map[string]string{ + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", + }, + PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node1", + Name: "node1", + Ports: []string{"lsp1"}, + }, + &nbdb.LogicalSwitch{ + UUID: "node2", + Name: "node2", + }, + &nbdb.BFD{ + UUID: bfd1NamedUUID, + DstIP: "9.0.0.1", + LogicalPort: "rtoe-GR_node1", + }, + &nbdb.LogicalRouterStaticRoute{ + UUID: "static-route-1-UUID", + IPPrefix: "10.128.1.3/32", + Nexthop: "9.0.0.1", + BFD: &bfd1NamedUUID, + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + OutputPort: &logicalRouterPort, + Options: map[string]string{ + "ecmp_symmetric_reply": "true", + }, + }, + &nbdb.LogicalRouterStaticRoute{ + UUID: "static-route-2-UUID", + IPPrefix: "10.128.1.3/32", + Nexthop: "10.0.0.1", + Policy: &nbdb.LogicalRouterStaticRoutePolicySrcIP, + OutputPort: &logicalRouterPort, + Options: map[string]string{ + "ecmp_symmetric_reply": "true", + }, + }, + &nbdb.LogicalRouter{ + UUID: "GR_node1-UUID", + Name: "GR_node1", + StaticRoutes: []string{"static-route-1-UUID", "static-route-2-UUID"}, + }, + } + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(finalNB)) + gomega.Eventually(func() string { + return getNamespaceAnnotations(fakeOvn.fakeClient.KubeClient, namespaceT.Name)[util.ExternalGatewayPodIPsAnnotation] + }).Should(gomega.Equal("10.0.0.1")) + return nil + } + err := app.Run([]string{app.Name}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -2030,8 +2617,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2171,8 +2758,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2364,8 +2951,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "requested-chassis": "node1", - "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", + "iface-id-ver": "myPod", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2546,8 +3133,8 @@ var _ = ginkgo.Describe("OVN Egress Gateway Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "requested-chassis": "node1", - "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", + "iface-id-ver": "myPod", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, diff --git a/go-controller/pkg/ovn/egressip.go b/go-controller/pkg/ovn/egressip.go index ed6adad9e9..ed018c0de3 100644 --- a/go-controller/pkg/ovn/egressip.go +++ b/go-controller/pkg/ovn/egressip.go @@ -26,8 +26,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -249,7 +249,7 @@ func NewEIPController( // CASE 3.4: Both Namespace && Pod Selectors on Spec changed // } // -// NOTE: `Spec.EgressIPs“ updates for EIP object are not processed here, that is the job of cluster manager +// NOTE: `Spec.EgressIPs" updates for EIP object are not processed here, that is the job of cluster manager // // We only care about `Spec.NamespaceSelector`, `Spec.PodSelector` and `Status` field func (e *EgressIPController) reconcileEgressIP(old, new *egressipv1.EgressIP) (err error) { @@ -2089,28 +2089,14 @@ func (e *EgressIPController) addEgressNode(node *corev1.Node) error { // NOTE3: When the node gets deleted we do not remove this route intentionally because // on IC if the node is gone, then the ovn_cluster_router is also gone along with all // the routes on it. - processNetworkFn := func(ni util.NetInfo) error { - if ni.TopologyType() == types.Layer2Topology || len(ni.Subnets()) == 0 { - return nil - } - if err := libovsdbutil.CreateDefaultRouteToExternal(e.nbClient, ni.GetNetworkScopedClusterRouterName(), - ni.GetNetworkScopedGWRouterName(node.Name), ni.Subnets()); err != nil { - return fmt.Errorf("failed to create route to external for network %s: %v", ni.GetNetworkName(), err) - } - return nil - } ni := e.networkManager.GetNetwork(types.DefaultNetworkName) - if ni == nil { - return fmt.Errorf("failed to get default network from NAD controller") - } - if err := processNetworkFn(ni); err != nil { - return fmt.Errorf("failed to process default network: %v", err) - } - if !isEgressIPForUDNSupported() { - return nil + gatewayIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, types.DefaultNetworkName) + if err != nil { + return fmt.Errorf("failed to get default network gateway router join IPs for node %q: %w", node.Name, err) } - if err := e.networkManager.DoWithLock(processNetworkFn); err != nil { - return fmt.Errorf("failed to process all user defined networks route to external: %v", err) + if err := libovsdbutil.CreateDefaultRouteToExternal(e.nbClient, ni.GetNetworkScopedClusterRouterName(), + ni.GetNetworkScopedGWRouterName(node.Name), ni.Subnets(), gatewayIPs); err != nil { + return fmt.Errorf("failed to create route to external for network %s: %v", ni.GetNetworkName(), err) } } } @@ -2608,9 +2594,21 @@ func (e *EgressIPController) addExternalGWPodSNATOps(ni util.NetInfo, ops []ovsd if err != nil { return nil, err } - ops, err = addOrUpdatePodSNATOps(e.nbClient, ni.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podIPs, "", ops) - if err != nil { - return nil, err + + // Handle each pod IP individually since each IP family needs its own SNAT match + for _, podIP := range podIPs { + ipFamily := utilnet.IPv4 + if utilnet.IsIPv6CIDR(podIP) { + ipFamily = utilnet.IPv6 + } + snatMatch, err := GetNetworkScopedClusterSubnetSNATMatch(e.nbClient, ni, pod.Spec.NodeName, util.IsPodNetworkAdvertisedAtNode(ni, pod.Spec.NodeName), ipFamily) + if err != nil { + return nil, fmt.Errorf("failed to get SNAT match for node %s for network %s: %w", pod.Spec.NodeName, ni.GetNetworkName(), err) + } + ops, err = addOrUpdatePodSNATOps(e.nbClient, ni.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, []*net.IPNet{podIP}, snatMatch, ops) + if err != nil { + return nil, err + } } klog.V(5).Infof("Adding SNAT on %s since egress node managing %s/%s was the same: %s", pod.Spec.NodeName, pod.Namespace, pod.Name, status.Node) } @@ -2631,7 +2629,7 @@ func (e *EgressIPController) deleteExternalGWPodSNATOps(ni util.NetInfo, ops []o if err != nil { return nil, err } - ops, err = deletePodSNATOps(e.nbClient, ops, ni.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, affectedIPs, "") + ops, err = deletePodSNATOps(e.nbClient, ops, ni.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, affectedIPs) if err != nil { return nil, err } @@ -3185,7 +3183,7 @@ func createDefaultNoRerouteServicePolicies(nbClient libovsdbclient.Client, netwo return nil } -func (e *EgressIPController) ensureRouterPoliciesForNetwork(ni util.NetInfo) error { +func (e *EgressIPController) ensureRouterPoliciesForNetwork(ni util.NetInfo, node *corev1.Node) error { e.nodeUpdateMutex.Lock() defer e.nodeUpdateMutex.Unlock() subnetEntries := ni.Subnets() @@ -3210,8 +3208,12 @@ func (e *EgressIPController) ensureRouterPoliciesForNetwork(ni util.NetInfo) err return fmt.Errorf("failed to ensure no reroute node policies for network %s: %v", ni.GetNetworkName(), err) } if config.OVNKubernetesFeature.EnableInterconnect && ni.TopologyType() == types.Layer3Topology { + gatewayIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, ni.GetNetworkName()) + if err != nil { + return fmt.Errorf("failed to get %q network gateway router join IPs for node %q, err: %w", ni.GetNetworkName(), node.Name, err) + } if err := libovsdbutil.CreateDefaultRouteToExternal(e.nbClient, routerName, - ni.GetNetworkScopedGWRouterName(localNode), subnetEntries); err != nil { + ni.GetNetworkScopedGWRouterName(localNode), subnetEntries, gatewayIPs); err != nil { return fmt.Errorf("failed to create route to external for network %s: %v", ni.GetNetworkName(), err) } } @@ -3649,12 +3651,12 @@ func (e *EgressIPController) createNATRuleOps(ni util.NetInfo, ops []ovsdb.Opera nats = append(nats, nat) } } - router := &nbdb.LogicalRouter{ + gwRouter := &nbdb.LogicalRouter{ Name: ni.GetNetworkScopedGWRouterName(status.Node), } - ops, err = libovsdbops.CreateOrUpdateNATsOps(e.nbClient, ops, router, nats...) + ops, err = libovsdbops.CreateOrUpdateNATsOps(e.nbClient, ops, gwRouter, nats...) if err != nil { - return nil, fmt.Errorf("unable to create snat rules, for router: %s, error: %v", router.Name, err) + return nil, fmt.Errorf("unable to create snat rules, for router: %s, error: %v", gwRouter.Name, err) } return ops, nil } diff --git a/go-controller/pkg/ovn/egressip_test.go b/go-controller/pkg/ovn/egressip_test.go index 8450c239d3..8cbac3665c 100644 --- a/go-controller/pkg/ovn/egressip_test.go +++ b/go-controller/pkg/ovn/egressip_test.go @@ -314,7 +314,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -428,7 +428,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -524,7 +524,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -632,7 +632,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -756,7 +756,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -766,7 +766,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -919,7 +919,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -929,7 +929,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1076,7 +1076,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1086,7 +1086,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1096,7 +1096,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1302,7 +1302,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1312,7 +1312,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1322,7 +1322,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1482,7 +1482,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1492,7 +1492,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1502,7 +1502,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1712,7 +1712,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1722,7 +1722,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1732,7 +1732,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1894,7 +1894,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -1904,7 +1904,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2055,7 +2055,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2065,7 +2065,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2269,7 +2269,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2279,7 +2279,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2500,7 +2500,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2510,7 +2510,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2721,7 +2721,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2731,7 +2731,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2867,7 +2867,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -2877,7 +2877,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -3008,7 +3008,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -3167,7 +3167,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -3177,7 +3177,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -3332,7 +3332,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -3342,7 +3342,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -5598,6 +5598,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "k8s.ovn.org/node-transit-switch-port-ifaddr": "{\"ipv4\":\"100.88.0.2/16\"}", // used only for ic=true test "k8s.ovn.org/zone-name": node1Zone, // used only for ic=true test util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}}`, nodeLogicalRouterIPv4[0]), } if node1Zone != "global" { annotations["k8s.ovn.org/remote-zone-migrated"] = node1Zone // used only for ic=true test @@ -5612,6 +5613,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "k8s.ovn.org/node-transit-switch-port-ifaddr": "{\"ipv4\":\"100.88.0.3/16\"}", // used only for ic=true test "k8s.ovn.org/zone-name": node2Zone, // used only for ic=true test util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}}`, node2LogicalRouterIPv4[0]), } if node2Zone != "global" { annotations["k8s.ovn.org/remote-zone-migrated"] = node2Zone // used only for ic=true test @@ -5688,7 +5690,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -5698,7 +5700,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -5860,7 +5862,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -5870,7 +5872,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6025,7 +6027,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6035,7 +6037,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6500,7 +6502,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6510,7 +6512,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6621,7 +6623,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6631,7 +6633,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6708,7 +6710,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + nodeName, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + nodeName, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + nodeName, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6802,7 +6804,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + nodeName, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + nodeName, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + nodeName, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -6913,7 +6915,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -7036,7 +7038,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -7072,8 +7074,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "namespace": egressPod1.Namespace, }, Options: map[string]string{ - "requested-chassis": egressPod1.Spec.NodeName, - "iface-id-ver": egressPod1.Name, + libovsdbops.RequestedChassis: egressPod1.Spec.NodeName, + "iface-id-ver": egressPod1.Name, }, PortSecurity: []string{podAddr}, } @@ -7134,7 +7136,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -7269,7 +7271,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -7389,7 +7391,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -7425,8 +7427,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "namespace": egressPod1.Namespace, }, Options: map[string]string{ - "requested-chassis": egressPod1.Spec.NodeName, - "iface-id-ver": egressPod1.Name, + libovsdbops.RequestedChassis: egressPod1.Spec.NodeName, + "iface-id-ver": egressPod1.Name, }, PortSecurity: []string{podAddr}, } @@ -7609,7 +7611,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -7619,7 +7621,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -7852,8 +7854,8 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" "namespace": egressPod1.Namespace, }, Options: map[string]string{ - "requested-chassis": egressPod1.Spec.NodeName, - "iface-id-ver": egressPod1.Name, + libovsdbops.RequestedChassis: egressPod1.Spec.NodeName, + "iface-id-ver": egressPod1.Name, }, PortSecurity: []string{podAddr}, } @@ -8231,7 +8233,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node.Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node.Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node.Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8315,7 +8317,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node.Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node.Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node.Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8400,7 +8402,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node.Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node.Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node.Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8487,7 +8489,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8497,7 +8499,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8582,7 +8584,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8592,7 +8594,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8686,7 +8688,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8696,7 +8698,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8781,7 +8783,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8791,7 +8793,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8866,7 +8868,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8876,7 +8878,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -8996,7 +8998,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9006,7 +9008,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9141,7 +9143,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9151,7 +9153,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9269,7 +9271,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9353,7 +9355,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9435,7 +9437,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9647,7 +9649,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9657,7 +9659,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9741,7 +9743,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9751,7 +9753,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9825,7 +9827,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9835,7 +9837,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9919,7 +9921,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -9929,7 +9931,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10021,7 +10023,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10031,7 +10033,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10116,7 +10118,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10126,7 +10128,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10201,7 +10203,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10211,7 +10213,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10312,7 +10314,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10322,7 +10324,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10419,7 +10421,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10615,7 +10617,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -10908,7 +10910,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11239,7 +11241,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11249,7 +11251,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11359,7 +11361,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11369,7 +11371,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11491,7 +11493,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11501,7 +11503,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11681,7 +11683,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11691,7 +11693,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11829,7 +11831,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11839,7 +11841,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11954,7 +11956,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -11964,7 +11966,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12062,7 +12064,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Options: map[string]string{ "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, }, }, &nbdb.LogicalSwitch{ @@ -12193,7 +12195,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12327,7 +12329,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Options: map[string]string{ "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, }, }, &nbdb.LogicalSwitchPort{ @@ -12335,7 +12337,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12345,7 +12347,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12477,7 +12479,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12487,7 +12489,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12497,7 +12499,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12659,7 +12661,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12669,7 +12671,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12679,7 +12681,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12845,7 +12847,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12855,7 +12857,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -12865,7 +12867,7 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations cluster default network" Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node3Name, Type: "router", Options: map[string]string{ - "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + "GR_" + node3Name, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, diff --git a/go-controller/pkg/ovn/egressip_udn_l2_test.go b/go-controller/pkg/ovn/egressip_udn_l2_test.go index de7431696b..c9080d6b71 100644 --- a/go-controller/pkg/ovn/egressip_udn_l2_test.go +++ b/go-controller/pkg/ovn/egressip_udn_l2_test.go @@ -304,7 +304,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol fakeOvn.controller.eIPC.nodeZoneState.Store(node2Name, false) fakeOvn.controller.eIPC.zone = node1.Name fakeOvn.controller.zone = node1.Name - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo, &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(netInfo, node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -670,7 +670,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer fakeOvn.networkManager.Stop() // simulate Start() of secondary network controller - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(secConInfo.bnc.GetNetInfo()) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(secConInfo.bnc.GetNetInfo(), &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(secConInfo.bnc.GetNetInfo(), node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1662,7 +1662,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol defer fakeOvn.networkManager.Stop() err = fakeOvn.controller.WatchEgressNodes() gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo, &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(netInfo, node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2026,7 +2026,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol err = fakeOvn.networkManager.Start() gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer fakeOvn.networkManager.Stop() - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo, &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(netInfo, node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/go-controller/pkg/ovn/egressip_udn_l3_test.go b/go-controller/pkg/ovn/egressip_udn_l3_test.go index 5dae356208..0d311a96d5 100644 --- a/go-controller/pkg/ovn/egressip_udn_l3_test.go +++ b/go-controller/pkg/ovn/egressip_udn_l3_test.go @@ -17,10 +17,12 @@ import ( k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" "k8s.io/klog/v2" + "k8s.io/utils/ptr" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressipv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressip/v1" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/udnenabledsvc" @@ -55,6 +57,8 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol eIP1Mark = 50000 eIP2Mark = 50001 secondaryNetworkID = "2" + //tnlKey = zoneinterconnect.BaseTransitSwitchTunnelKey + secondaryNetworkID + tnlKey = "16711685" ) getEgressIPStatusLen := func(egressIPName string) func() int { @@ -98,6 +102,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol config.OVNKubernetesFeature.EnableMultiNetwork = true config.Gateway.Mode = config.GatewayModeShared config.OVNKubernetesFeature.EgressIPNodeHealthCheckPort = 1234 + config.Gateway.V4MasqueradeSubnet = dummyMasqueradeSubnet().String() app = cli.NewApp() app.Name = "test" @@ -159,6 +164,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node1Tsp), "k8s.ovn.org/zone-name": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, nodeLogicalRouterIPv4[0], networkName1, nodeLogicalRouterIPv4[0]), } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", @@ -170,6 +176,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf("{\"ipv4\":\"%s/16\"}", v4Node2Tsp), "k8s.ovn.org/zone-name": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, node2LogicalRouterIPv4[0], networkName1, node2LogicalRouterIPv4[0]), } node2 := getNodeObj(node2Name, node2Annotations, labels) eIP := egressipv1.EgressIP{ @@ -295,7 +302,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol fakeOvn.controller.eIPC.nodeZoneState.Store(node2Name, false) fakeOvn.controller.eIPC.zone = node1.Name fakeOvn.controller.zone = node1.Name - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo, &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(netInfo, node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -535,6 +542,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, nodeLogicalRouterIPv4[0], networkName1, nodeLogicalRouterIPv4[0]), } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", @@ -547,6 +555,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, node2LogicalRouterIPv4[0], networkName1, node2LogicalRouterIPv4[0]), } node2 := getNodeObj(node2Name, node2Annotations, labels) twoNodeStatus := []egressipv1.EgressIPStatusItem{ @@ -668,7 +677,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer fakeOvn.networkManager.Stop() // simulate Start() of secondary network controller - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(secConInfo.bnc.GetNetInfo()) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(secConInfo.bnc.GetNetInfo(), &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(secConInfo.bnc.GetNetInfo(), node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1054,6 +1063,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, nodeLogicalRouterIPv4[0], networkName1, nodeLogicalRouterIPv4[0]), } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", @@ -1066,6 +1076,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, node2LogicalRouterIPv4[0], networkName1, node2LogicalRouterIPv4[0]), } node2 := getNodeObj(node2Name, node2Annotations, labels) twoNodeStatus := []egressipv1.EgressIPStatusItem{ @@ -1195,9 +1206,11 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol // Add pod IPs to UDN cache iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") nUDN.IP = iUDN + secConInfo.bnc.zone = node1.Name secConInfo.bnc.logicalPortCache.add(&egressPodUDNLocal, "", util.GetNADName(nad.Namespace, nad.Name), "", nil, []*net.IPNet{nUDN}) _, err = fakeOvn.fakeClient.EgressIPClient.K8sV1().EgressIPs().Create(context.TODO(), &eIP, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(secConInfo.bnc.WatchNodes()).To(gomega.Succeed()) egressSVCServedPodsASv4, _ := buildEgressServiceAddressSets(nil) egressIPServedPodsASCDNv4, _ := buildEgressIPServedPodsAddressSets([]string{podV4IP}, ovntypes.DefaultNetworkName, DefaultNetworkControllerName) egressNodeIPsASv4, _ := buildEgressIPNodeAddressSets([]string{node1IPv4, node2IPv4}) @@ -1325,6 +1338,19 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, Networks: []string{nodeLogicalRouterIfAddrV4}, }, + &nbdb.NAT{ + UUID: networkName1_ + node1Name + "-masqueradeNAT-UUID", + ExternalIDs: map[string]string{ + "k8s.ovn.org/topology": "layer3", + "k8s.ovn.org/network": networkName1, + }, + ExternalIP: "169.254.169.14", + LogicalIP: node1UDNSubnet.String(), + LogicalPort: ptr.To("rtos-" + networkName1_ + node1Name), + Match: "eth.dst == 0a:58:14:80:00:02", + Type: nbdb.NATTypeSNAT, + Options: map[string]string{"stateless": "false"}, + }, &nbdb.LogicalRouter{ Name: netInfo.GetNetworkScopedClusterRouterName(), UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", @@ -1333,6 +1359,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), getReRoutePolicyUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName())}, StaticRoutes: []string{fmt.Sprintf("%s-reroute-static-route-UUID", netInfo.GetNetworkName())}, + Nat: []string{networkName1_ + node1Name + "-masqueradeNAT-UUID"}, }, &nbdb.LogicalRouter{ UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", @@ -1345,14 +1372,57 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol &nbdb.LogicalSwitchPort{ UUID: "k8s-" + networkName1_ + node1Name + "-UUID", Name: "k8s-" + networkName1_ + node1Name, - Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + Addresses: []string{"0a:58:14:80:00:02 " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "stor-" + networkName1_ + node1Name + "-UUID", + Name: "stor-" + networkName1_ + node1Name, + Addresses: []string{"router"}, + Options: map[string]string{libovsdbops.RouterPort: "rtos-" + networkName1_ + node1Name}, + Type: "router", + }, + &nbdb.ACL{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-NetpolNode-UUID", + Direction: nbdb.ACLDirectionToLport, + Action: nbdb.ACLActionAllowRelated, + ExternalIDs: map[string]string{ + "k8s.ovn.org/name": networkName1_ + node1Name, + "ip": util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String(), + "k8s.ovn.org/id": fmt.Sprintf("%s-network-controller:NetpolNode:%s:%s", networkName1, networkName1_+node1Name, util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()), + "k8s.ovn.org/owner-controller": networkName1 + "-network-controller", + "k8s.ovn.org/owner-type": "NetpolNode", + }, + Match: fmt.Sprintf("ip4.src==%s", util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()), + Meter: ptr.To(ovntypes.OvnACLLoggingMeter), + Priority: ovntypes.PrimaryUDNAllowPriority, + Tier: ovntypes.DefaultACLTier, }, &nbdb.LogicalSwitch{ UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", Name: netInfo.GetNetworkScopedSwitchName(node1.Name), - Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID", "stor-" + networkName1_ + node1Name + "-UUID"}, ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + OtherConfig: map[string]string{ + "exclude_ips": util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String(), + "subnet": node1UDNSubnet.String(), + }, + ACLs: []string{netInfo.GetNetworkScopedSwitchName(node1.Name) + "-NetpolNode-UUID"}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "TRANSIT-UUID", + Name: networkName1_ + ovntypes.TransitSwitch, + ExternalIDs: map[string]string{ + ovntypes.NetworkExternalID: netInfo.GetNetworkName(), + ovntypes.TopologyExternalID: ovntypes.Layer3Topology, + ovntypes.NetworkRoleExternalID: ovntypes.NetworkRolePrimary}, + OtherConfig: map[string]string{ + "mcast_snoop": "true", + "mcast_querier": "false", + "mcast_flood_unregistered": "true", + "interconn-ts": networkName1_ + ovntypes.TransitSwitch, + libovsdbops.RequestedTnlKey: tnlKey, + }, }, getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), @@ -1457,6 +1527,19 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, Networks: []string{nodeLogicalRouterIfAddrV4}, }, + &nbdb.NAT{ + UUID: networkName1_ + node1Name + "-masqueradeNAT-UUID", + ExternalIDs: map[string]string{ + "k8s.ovn.org/topology": "layer3", + "k8s.ovn.org/network": networkName1, + }, + ExternalIP: "169.254.169.14", + LogicalIP: node1UDNSubnet.String(), + LogicalPort: ptr.To("rtos-" + networkName1_ + node1Name), + Match: "eth.dst == 0a:58:14:80:00:02", + Type: nbdb.NATTypeSNAT, + Options: map[string]string{"stateless": "false"}, + }, &nbdb.LogicalRouter{ Name: netInfo.GetNetworkScopedClusterRouterName(), UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", @@ -1465,6 +1548,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), }, StaticRoutes: []string{fmt.Sprintf("%s-reroute-static-route-UUID", netInfo.GetNetworkName())}, + Nat: []string{networkName1_ + node1Name + "-masqueradeNAT-UUID"}, }, &nbdb.LogicalRouter{ UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", @@ -1475,14 +1559,57 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol &nbdb.LogicalSwitchPort{ UUID: "k8s-" + networkName1_ + node1Name + "-UUID", Name: "k8s-" + networkName1_ + node1Name, - Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + Addresses: []string{"0a:58:14:80:00:02 " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "stor-" + networkName1_ + node1Name + "-UUID", + Name: "stor-" + networkName1_ + node1Name, + Addresses: []string{"router"}, + Options: map[string]string{libovsdbops.RouterPort: "rtos-" + networkName1_ + node1Name}, + Type: "router", + }, + &nbdb.ACL{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-NetpolNode-UUID", + Direction: nbdb.ACLDirectionToLport, + Action: nbdb.ACLActionAllowRelated, + ExternalIDs: map[string]string{ + "k8s.ovn.org/name": networkName1_ + node1Name, + "ip": util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String(), + "k8s.ovn.org/id": fmt.Sprintf("%s-network-controller:NetpolNode:%s:%s", networkName1, networkName1_+node1Name, util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()), + "k8s.ovn.org/owner-controller": networkName1 + "-network-controller", + "k8s.ovn.org/owner-type": "NetpolNode", + }, + Match: fmt.Sprintf("ip4.src==%s", util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()), + Meter: ptr.To(ovntypes.OvnACLLoggingMeter), + Priority: ovntypes.PrimaryUDNAllowPriority, + Tier: ovntypes.DefaultACLTier, }, &nbdb.LogicalSwitch{ UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", Name: netInfo.GetNetworkScopedSwitchName(node1.Name), - Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID", "stor-" + networkName1_ + node1Name + "-UUID"}, ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + OtherConfig: map[string]string{ + "exclude_ips": util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String(), + "subnet": node1UDNSubnet.String(), + }, + ACLs: []string{netInfo.GetNetworkScopedSwitchName(node1.Name) + "-NetpolNode-UUID"}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "TRANSIT-UUID", + Name: networkName1_ + ovntypes.TransitSwitch, + ExternalIDs: map[string]string{ + ovntypes.NetworkExternalID: netInfo.GetNetworkName(), + ovntypes.TopologyExternalID: ovntypes.Layer3Topology, + ovntypes.NetworkRoleExternalID: ovntypes.NetworkRolePrimary}, + OtherConfig: map[string]string{ + "mcast_snoop": "true", + "mcast_querier": "false", + "mcast_flood_unregistered": "true", + "interconn-ts": networkName1_ + ovntypes.TransitSwitch, + libovsdbops.RequestedTnlKey: tnlKey, + }, }, getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), @@ -1547,6 +1674,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, nodeLogicalRouterIPv4[0], networkName1, nodeLogicalRouterIPv4[0]), } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", @@ -1559,6 +1687,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, node2LogicalRouterIPv4[0], networkName1, node2LogicalRouterIPv4[0]), } node2 := getNodeObj(node2Name, node2Annotations, labels) twoNodeStatus := []egressipv1.EgressIPStatusItem{ @@ -1680,7 +1809,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol err = fakeOvn.networkManager.Start() gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer fakeOvn.networkManager.Stop() - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo, &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(netInfo, node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1915,6 +2044,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, nodeLogicalRouterIPv4[0], networkName1, nodeLogicalRouterIPv4[0]), } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", @@ -1927,6 +2057,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, node2LogicalRouterIPv4[0], networkName1, node2LogicalRouterIPv4[0]), } node2 := getNodeObj(node2Name, node2Annotations, labels) twoNodeStatus := []egressipv1.EgressIPStatusItem{ @@ -2052,7 +2183,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol err = fakeOvn.networkManager.Start() gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer fakeOvn.networkManager.Stop() - err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo) + err = fakeOvn.eIPController.ensureRouterPoliciesForNetwork(netInfo, &node1) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = fakeOvn.eIPController.ensureSwitchPoliciesForNode(netInfo, node1Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -2274,6 +2405,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node1Name, "k8s.ovn.org/remote-zone-migrated": node1Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node1IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, nodeLogicalRouterIPv4[0], networkName1, nodeLogicalRouterIPv4[0]), } labels := map[string]string{ "k8s.ovn.org/egress-assignable": "", @@ -2286,6 +2418,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol "k8s.ovn.org/zone-name": node2Name, "k8s.ovn.org/remote-zone-migrated": node2Name, util.OVNNodeHostCIDRs: fmt.Sprintf("[\"%s\"]", node2IPv4CIDR), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s/16"}, "%s":{"ipv4":"%s/16"}}`, node2LogicalRouterIPv4[0], networkName1, node2LogicalRouterIPv4[0]), } node2 := getNodeObj(node2Name, node2Annotations, labels) twoNodeStatus := []egressipv1.EgressIPStatusItem{ @@ -2415,6 +2548,8 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol gomega.Expect(err).NotTo(gomega.HaveOccurred()) secConInfo, ok := fakeOvn.secondaryControllers[networkName1] gomega.Expect(ok).To(gomega.BeTrue()) + secConInfo.bnc.zone = node1.Name + gomega.Expect(secConInfo.bnc.WatchNodes()).To(gomega.Succeed()) // Add pod IPs to UDN cache iUDN, nUDN, _ := net.ParseCIDR(v4Pod1IPNode1Net1 + "/23") nUDN.IP = iUDN @@ -2553,6 +2688,19 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + networkName1_ + node1.Name, Networks: []string{nodeLogicalRouterIfAddrV4}, }, + &nbdb.NAT{ + UUID: networkName1_ + node1Name + "-masqueradeNAT-UUID", + ExternalIDs: map[string]string{ + "k8s.ovn.org/topology": "layer3", + "k8s.ovn.org/network": networkName1, + }, + ExternalIP: "169.254.169.14", + LogicalIP: node1UDNSubnet.String(), + LogicalPort: ptr.To("rtos-" + networkName1_ + node1Name), + Match: "eth.dst == 0a:58:14:80:00:02", + Type: nbdb.NATTypeSNAT, + Options: map[string]string{"stateless": "false"}, + }, &nbdb.LogicalRouter{ Name: netInfo.GetNetworkScopedClusterRouterName(), UUID: netInfo.GetNetworkScopedClusterRouterName() + "-UUID", @@ -2561,6 +2709,7 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol fmt.Sprintf("%s-no-reroute-reply-traffic", netInfo.GetNetworkName()), getReRoutePolicyUUID(eipNamespace2, podName2, IPFamilyValueV4, netInfo.GetNetworkName())}, StaticRoutes: []string{fmt.Sprintf("%s-reroute-static-route-UUID", netInfo.GetNetworkName())}, + Nat: []string{networkName1_ + node1Name + "-masqueradeNAT-UUID"}, }, &nbdb.LogicalRouter{ UUID: netInfo.GetNetworkScopedGWRouterName(node1.Name) + "-UUID", @@ -2573,14 +2722,58 @@ var _ = ginkgo.Describe("EgressIP Operations for user defined network with topol &nbdb.LogicalSwitchPort{ UUID: "k8s-" + networkName1_ + node1Name + "-UUID", Name: "k8s-" + networkName1_ + node1Name, - Addresses: []string{"fe:1a:b2:3f:0e:fb " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + Addresses: []string{"0a:58:14:80:00:02 " + util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()}, + }, + &nbdb.LogicalSwitchPort{ + UUID: "stor-" + networkName1_ + node1Name + "-UUID", + Name: "stor-" + networkName1_ + node1Name, + Addresses: []string{"router"}, + Options: map[string]string{libovsdbops.RouterPort: "rtos-" + networkName1_ + node1Name}, + Type: "router", + }, + &nbdb.ACL{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-NetpolNode-UUID", + Direction: nbdb.ACLDirectionToLport, + Action: nbdb.ACLActionAllowRelated, + ExternalIDs: map[string]string{ + "k8s.ovn.org/name": networkName1_ + node1Name, + "ip": util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String(), + "k8s.ovn.org/id": fmt.Sprintf("%s-network-controller:NetpolNode:%s:%s", networkName1, networkName1_+node1Name, util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()), + "k8s.ovn.org/owner-controller": networkName1 + "-network-controller", + "k8s.ovn.org/owner-type": "NetpolNode", + }, + Match: fmt.Sprintf("ip4.src==%s", util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String()), + Meter: ptr.To(ovntypes.OvnACLLoggingMeter), + Priority: ovntypes.PrimaryUDNAllowPriority, + Tier: ovntypes.DefaultACLTier, }, &nbdb.LogicalSwitch{ UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "-UUID", Name: netInfo.GetNetworkScopedSwitchName(node1.Name), - Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID"}, + Ports: []string{"k8s-" + networkName1_ + node1Name + "-UUID", "stor-" + networkName1_ + node1Name + "-UUID"}, ExternalIDs: map[string]string{ovntypes.NetworkExternalID: netInfo.GetNetworkName(), ovntypes.TopologyExternalID: ovntypes.Layer3Topology}, QOSRules: []string{fmt.Sprintf("%s-QoS-UUID", netInfo.GetNetworkName())}, + OtherConfig: map[string]string{ + "exclude_ips": util.GetNodeManagementIfAddr(node1UDNSubnet).IP.String(), + "subnet": node1UDNSubnet.String(), + }, + ACLs: []string{netInfo.GetNetworkScopedSwitchName(node1.Name) + "-NetpolNode-UUID"}, + }, + &nbdb.LogicalSwitch{ + UUID: netInfo.GetNetworkScopedSwitchName(node1.Name) + "TRANSIT-UUID", + Name: networkName1_ + ovntypes.TransitSwitch, + ExternalIDs: map[string]string{ + ovntypes.NetworkExternalID: netInfo.GetNetworkName(), + ovntypes.TopologyExternalID: ovntypes.Layer3Topology, + ovntypes.NetworkRoleExternalID: ovntypes.NetworkRolePrimary, + }, + OtherConfig: map[string]string{ + "mcast_snoop": "true", + "mcast_querier": "false", + "mcast_flood_unregistered": "true", + "interconn-ts": networkName1_ + ovntypes.TransitSwitch, + libovsdbops.RequestedTnlKey: tnlKey, + }, }, getNoReRouteReplyTrafficPolicyForController(netInfo.GetNetworkName(), DefaultNetworkControllerName), getDefaultQoSRule(false, netInfo.GetNetworkName(), DefaultNetworkControllerName), diff --git a/go-controller/pkg/ovn/egressqos.go b/go-controller/pkg/ovn/egressqos.go index fc6258408e..605b127d03 100644 --- a/go-controller/pkg/ovn/egressqos.go +++ b/go-controller/pkg/ovn/egressqos.go @@ -24,7 +24,7 @@ import ( utilnet "k8s.io/utils/net" "k8s.io/utils/ptr" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressqosapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1" diff --git a/go-controller/pkg/ovn/egressservices_test.go b/go-controller/pkg/ovn/egressservices_test.go index cf2b6cb3af..5b47022a9d 100644 --- a/go-controller/pkg/ovn/egressservices_test.go +++ b/go-controller/pkg/ovn/egressservices_test.go @@ -956,6 +956,7 @@ var _ = ginkgo.Describe("OVN Egress Service Operations", func() { config.IPv6Mode = true config.OVNKubernetesFeature.EnableInterconnect = interconnectEnabled node1 := nodeFor(node1Name, node1IPv4, node1IPv6, node1IPv4Subnet, node1IPv6Subnet, node1transitIPv4, node1transitIPv6) + node1.Annotations[util.OVNNodeGRLRPAddrs] = `{"default":{"ipv4":"100.64.0.2/16", "ipv6":"fef0::56/16"}}` node2 := nodeFor(node2Name, node2IPv4, node2IPv6, node2IPv4Subnet, node2IPv6Subnet, node2transitIPv4, node2transitIPv6) clusterRouter := &nbdb.LogicalRouter{ diff --git a/go-controller/pkg/ovn/external_gateway_apb_test.go b/go-controller/pkg/ovn/external_gateway_apb_test.go index 605066d7f6..b237174ae0 100644 --- a/go-controller/pkg/ovn/external_gateway_apb_test.go +++ b/go-controller/pkg/ovn/external_gateway_apb_test.go @@ -22,6 +22,7 @@ import ( adminpolicybasedrouteapi "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1" adminpolicybasedrouteclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/clientset/versioned" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/controller/apbroute" @@ -176,8 +177,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -212,8 +213,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -322,8 +323,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -358,8 +359,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -461,8 +462,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -497,8 +498,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -604,8 +605,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -650,8 +651,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -812,8 +813,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -894,8 +895,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1037,8 +1038,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:49:a1:93:cb fd00:10:244:2::3"}, }, @@ -1165,8 +1166,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1236,8 +1237,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1337,8 +1338,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1373,8 +1374,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1479,8 +1480,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1515,8 +1516,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1641,8 +1642,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1677,8 +1678,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1795,8 +1796,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1831,8 +1832,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1858,8 +1859,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1900,8 +1901,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -1990,8 +1991,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2110,8 +2111,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2241,8 +2242,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2283,8 +2284,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2339,8 +2340,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "iface-id-ver": "myPod", - "requested-chassis": "node1", + "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, @@ -2538,8 +2539,8 @@ var _ = ginkgo.Describe("OVN for APB External Route Operations", func() { }, Name: "namespace1_myPod", Options: map[string]string{ - "requested-chassis": "node1", - "iface-id-ver": "myPod", + libovsdbops.RequestedChassis: "node1", + "iface-id-ver": "myPod", }, PortSecurity: []string{"0a:58:0a:80:01:03 10.128.1.3"}, }, diff --git a/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go index 8933e78521..01808d3927 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go +++ b/go-controller/pkg/ovn/external_ids_syncer/logical_router_policy/logical_router_policy_sync.go @@ -9,8 +9,8 @@ import ( "k8s.io/klog/v2" utilsnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go index cf9d433cc8..ba81787817 100644 --- a/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go +++ b/go-controller/pkg/ovn/external_ids_syncer/nat/nat_sync.go @@ -7,8 +7,8 @@ import ( "k8s.io/klog/v2" utilsnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/gateway.go b/go-controller/pkg/ovn/gateway.go index 9ecbb512fd..d652dbcd8a 100644 --- a/go-controller/pkg/ovn/gateway.go +++ b/go-controller/pkg/ovn/gateway.go @@ -11,13 +11,11 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" @@ -27,6 +25,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/node" + addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/gateway" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/gatewayrouter" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -44,7 +43,6 @@ type GatewayManager struct { nbClient libovsdbclient.Client netInfo util.NetInfo watchFactory *factory.WatchFactory - // Cluster wide Load_Balancer_Group UUID. // Includes all node switches and node gateway routers. clusterLoadBalancerGroupUUID string @@ -145,8 +143,8 @@ func WithLoadBalancerGroups(routerLBGroup, clusterLBGroup, switchLBGroup string) } // cleanupStalePodSNATs removes pod SNATs against nodeIP for the given node if -// the SNAT.logicalIP isn't an active podIP, the pod network is being advertised -// on this node or disableSNATMultipleGWs=false. We don't have to worry about +// the SNAT.logicalIP isn't an active podIP, or disableSNATMultipleGWs=false. +// We don't have to worry about // missing SNATs that should be added because addLogicalPort takes care of this // for all pods when RequestRetryObjs is called for each node add. // Other non-pod SNATs like join subnet SNATs are ignored. @@ -156,14 +154,12 @@ func WithLoadBalancerGroups(routerLBGroup, clusterLBGroup, switchLBGroup string) // pod->nodeSNATs which won't get cleared up unless explicitly deleted. // NOTE2: egressIP SNATs are synced in EIP controller. func (gw *GatewayManager) cleanupStalePodSNATs(nodeName string, nodeIPs []*net.IPNet, gwLRPIPs []net.IP) error { - // collect all the pod IPs for which we should be doing the SNAT; if the pod - // network is advertised or DisableSNATMultipleGWs==false we consider all + // collect all the pod IPs for which we should be doing the SNAT; + // if DisableSNATMultipleGWs==false we consider all // the SNATs stale podIPsWithSNAT := sets.New[string]() - if !gw.isRoutingAdvertised(nodeName) && config.Gateway.DisableSNATMultipleGWs { - pods, err := gw.kube.GetPods(metav1.NamespaceAll, metav1.ListOptions{ - FieldSelector: fields.OneTermEqualSelector("spec.nodeName", nodeName).String(), - }) + if config.Gateway.DisableSNATMultipleGWs { + pods, err := gw.watchFactory.GetAllPods() if err != nil { return fmt.Errorf("unable to list existing pods on node: %s, %w", nodeName, err) @@ -173,6 +169,9 @@ func (gw *GatewayManager) cleanupStalePodSNATs(nodeName string, nodeIPs []*net.I if !util.PodScheduled(&pod) { //if the pod is not scheduled we should not remove the nat continue } + if pod.Spec.NodeName != nodeName { + continue + } if util.PodCompleted(&pod) { collidingPod, err := findPodWithIPAddresses(gw.watchFactory, gw.netInfo, []net.IP{utilnet.ParseIPSloppy(pod.Status.PodIP)}, "") //even if a pod is completed we should still delete the nat if the ip is not in use anymore if err != nil { @@ -232,7 +231,6 @@ func (gw *GatewayManager) cleanupStalePodSNATs(nodeName string, nodeIPs []*net.I } natsToDelete = append(natsToDelete, routerNat) } - if len(natsToDelete) > 0 { err := libovsdbops.DeleteNATs(gw.nbClient, gatewayRouter, natsToDelete...) if err != nil { @@ -243,39 +241,8 @@ func (gw *GatewayManager) cleanupStalePodSNATs(nodeName string, nodeIPs []*net.I return nil } -// GatewayInit creates a gateway router for the local chassis. -// enableGatewayMTU enables options:gateway_mtu for gateway routers. -func (gw *GatewayManager) GatewayInit( - nodeName string, - clusterIPSubnet []*net.IPNet, - hostSubnets []*net.IPNet, - l3GatewayConfig *util.L3GatewayConfig, - gwLRPJoinIPs, drLRPIfAddrs []*net.IPNet, - externalIPs []net.IP, - enableGatewayMTU bool, -) error { - - gwLRPIPs := make([]net.IP, 0) - for _, gwLRPJoinIP := range gwLRPJoinIPs { - gwLRPIPs = append(gwLRPIPs, gwLRPJoinIP.IP) - } - if gw.netInfo.TopologyType() == types.Layer2Topology { - // At layer2 GR LRP acts as the layer3 ovn_cluster_router so we need - // to configure here the .1 address, this will work only for IC with - // one node per zone, since ARPs for .1 will not go beyond local switch. - // This is being done to add the ICMP SNATs for .1 podSubnet that OVN GR generates - for _, subnet := range hostSubnets { - gwLRPIPs = append(gwLRPIPs, util.GetNodeGatewayIfAddr(subnet).IP) - } - } - +func (gw *GatewayManager) createGWRouter(l3GatewayConfig *util.L3GatewayConfig, gwLRPJoinIPs []*net.IPNet) (*nbdb.LogicalRouter, error) { // Create a gateway router. - gatewayRouter := gw.gwRouterName - physicalIPs := make([]string, len(l3GatewayConfig.IPAddresses)) - for i, ip := range l3GatewayConfig.IPAddresses { - physicalIPs[i] = ip.IP.String() - } - dynamicNeighRouters := "true" if config.OVNKubernetesFeature.EnableInterconnect { dynamicNeighRouters = "false" @@ -306,6 +273,10 @@ func (gw *GatewayManager) GatewayInit( } logicalRouterOptions["lb_force_snat_ip"] = strings.Join(joinIPDualStack, " ") } + physicalIPs := make([]string, len(l3GatewayConfig.IPAddresses)) + for i, ip := range l3GatewayConfig.IPAddresses { + physicalIPs[i] = ip.IP.String() + } logicalRouterExternalIDs := map[string]string{ "physical_ip": physicalIPs[0], "physical_ips": strings.Join(physicalIPs, ","), @@ -315,68 +286,62 @@ func (gw *GatewayManager) GatewayInit( maps.Copy(logicalRouterExternalIDs, util.GenerateExternalIDsForSwitchOrRouter(gw.netInfo)) } - logicalRouter := nbdb.LogicalRouter{ - Name: gatewayRouter, + gwRouter := nbdb.LogicalRouter{ + Name: gw.gwRouterName, Options: logicalRouterOptions, ExternalIDs: logicalRouterExternalIDs, Copp: &gw.coppUUID, } if gw.clusterLoadBalancerGroupUUID != "" { - logicalRouter.LoadBalancerGroup = []string{gw.clusterLoadBalancerGroupUUID} + gwRouter.LoadBalancerGroup = []string{gw.clusterLoadBalancerGroupUUID} if l3GatewayConfig.NodePortEnable && gw.routerLoadBalancerGroupUUID != "" { // add routerLoadBalancerGroupUUID to the gateway router only if nodePort is enabled - logicalRouter.LoadBalancerGroup = append(logicalRouter.LoadBalancerGroup, gw.routerLoadBalancerGroupUUID) + gwRouter.LoadBalancerGroup = append(gwRouter.LoadBalancerGroup, gw.routerLoadBalancerGroupUUID) } } - // If l3gatewayAnnotation.IPAddresses changed, we need to update the perPodSNATs, - // so let's save the old value before we update the router for later use - var oldExtIPs []net.IP - oldLogicalRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, &logicalRouter) - if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { - return fmt.Errorf("failed in retrieving %s, error: %v", gatewayRouter, err) - } - - if oldLogicalRouter != nil && oldLogicalRouter.ExternalIDs != nil { - if physicalIPs, ok := oldLogicalRouter.ExternalIDs["physical_ips"]; ok { - oldExternalIPs := strings.Split(physicalIPs, ",") - oldExtIPs = make([]net.IP, len(oldExternalIPs)) - for i, oldExternalIP := range oldExternalIPs { - cidr := oldExternalIP + util.GetIPFullMaskString(oldExternalIP) - ip, _, err := net.ParseCIDR(cidr) - if err != nil { - return fmt.Errorf("invalid cidr:%s error: %v", cidr, err) - } - oldExtIPs[i] = ip - } - } + err := libovsdbops.CreateOrUpdateLogicalRouter(gw.nbClient, &gwRouter, &gwRouter.Options, + &gwRouter.ExternalIDs, &gwRouter.LoadBalancerGroup, &gwRouter.Copp) + if err != nil { + return nil, fmt.Errorf("failed to create logical router %+v: %v", gwRouter, err) } + return &gwRouter, nil +} - err = libovsdbops.CreateOrUpdateLogicalRouter(gw.nbClient, &logicalRouter, &logicalRouter.Options, - &logicalRouter.ExternalIDs, &logicalRouter.LoadBalancerGroup, &logicalRouter.Copp) - if err != nil { - return fmt.Errorf("failed to create logical router %+v: %v", logicalRouter, err) +func (gw *GatewayManager) getGWRouterPeerPortName() string { + // In Layer2 networks there is no join switch and the gw.joinSwitchName points to the cluster switch. + // Ensure that the ports are named appropriately, this is important for the logical router policies + // created for local node access. + // TODO(kyrtapz): Clean this up for clarity as part of https://github.com/ovn-org/ovn-kubernetes/issues/4689 + if gw.netInfo.TopologyType() == types.Layer2Topology { + return types.SwitchToRouterPrefix + gw.joinSwitchName } - gwSwitchPort := types.JoinSwitchToGWRouterPrefix + gatewayRouter - gwRouterPort := types.GWRouterToJoinSwitchPrefix + gatewayRouter + return types.JoinSwitchToGWRouterPrefix + gw.gwRouterName +} +func (gw *GatewayManager) getGWRouterPortName() string { // In Layer2 networks there is no join switch and the gw.joinSwitchName points to the cluster switch. // Ensure that the ports are named appropriately, this is important for the logical router policies // created for local node access. // TODO(kyrtapz): Clean this up for clarity as part of https://github.com/ovn-org/ovn-kubernetes/issues/4689 if gw.netInfo.TopologyType() == types.Layer2Topology { - gwSwitchPort = types.SwitchToRouterPrefix + gw.joinSwitchName - gwRouterPort = types.RouterToSwitchPrefix + gw.joinSwitchName + return types.RouterToSwitchPrefix + gw.joinSwitchName } + return types.GWRouterToJoinSwitchPrefix + gw.gwRouterName +} + +func (gw *GatewayManager) createGWRouterPeerPort(nodeName string) error { + gwSwitchPort := gw.getGWRouterPeerPortName() + gwRouterPortName := gw.getGWRouterPortName() logicalSwitchPort := nbdb.LogicalSwitchPort{ Name: gwSwitchPort, Type: "router", Addresses: []string{"router"}, Options: map[string]string{ - "router-port": gwRouterPort, + libovsdbops.RouterPort: gwRouterPortName, }, } if gw.netInfo.IsSecondary() { @@ -384,64 +349,73 @@ func (gw *GatewayManager) GatewayInit( types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), } - if gw.netInfo.TopologyType() == types.Layer2Topology { - node, err := gw.watchFactory.GetNode(nodeName) - if err != nil { - return fmt.Errorf("failed to fetch node %s from watch factory %w", node, err) - } - tunnelID, err := util.ParseUDNLayer2NodeGRLRPTunnelIDs(node, gw.netInfo.GetNetworkName()) - if err != nil { - if util.IsAnnotationNotSetError(err) { - // remote node may not have the annotation yet, suppress it - return types.NewSuppressedError(err) - } - // Don't consider this node as cluster-manager has not allocated node id yet. - return fmt.Errorf("failed to fetch tunnelID annotation from the node %s for network %s, err: %w", - nodeName, gw.netInfo.GetNetworkName(), err) + } + if gw.netInfo.TopologyType() == types.Layer2Topology { + node, err := gw.watchFactory.GetNode(nodeName) + if err != nil { + return fmt.Errorf("failed to fetch node %s from watch factory %w", node.Name, err) + } + tunnelID, err := util.ParseUDNLayer2NodeGRLRPTunnelIDs(node, gw.netInfo.GetNetworkName()) + if err != nil { + if util.IsAnnotationNotSetError(err) { + // remote node may not have the annotation yet, suppress it + return types.NewSuppressedError(err) } - logicalSwitchPort.Options["requested-tnl-key"] = strconv.Itoa(tunnelID) + // Don't consider this node as cluster-manager has not allocated node id yet. + return fmt.Errorf("failed to fetch tunnelID annotation from the node %s for network %s, err: %w", + nodeName, gw.netInfo.GetNetworkName(), err) } + logicalSwitchPort.Options[libovsdbops.RequestedTnlKey] = strconv.Itoa(tunnelID) } sw := nbdb.LogicalSwitch{Name: gw.joinSwitchName} - err = libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitch(gw.nbClient, &sw, &logicalSwitchPort) + err := libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitch(gw.nbClient, &sw, &logicalSwitchPort) if err != nil { return fmt.Errorf("failed to create port %v on logical switch %q: %v", gwSwitchPort, sw.Name, err) } + return err +} - gwLRPMAC := util.IPAddrToHWAddr(gwLRPIPs[0]) +func (gw *GatewayManager) createGWRouterPort(hostSubnets []*net.IPNet, gwLRPJoinIPs []*net.IPNet, + enableGatewayMTU bool, gwRouter *nbdb.LogicalRouter) ([]net.IP, error) { + gwLRPIPs := make([]net.IP, 0) gwLRPNetworks := []string{} for _, gwLRPJoinIP := range gwLRPJoinIPs { + gwLRPIPs = append(gwLRPIPs, gwLRPJoinIP.IP) gwLRPNetworks = append(gwLRPNetworks, gwLRPJoinIP.String()) } if gw.netInfo.TopologyType() == types.Layer2Topology { // At layer2 GR LRP acts as the layer3 ovn_cluster_router so we need // to configure here the .1 address, this will work only for IC with // one node per zone, since ARPs for .1 will not go beyond local switch. + // This is being done to add the ICMP SNATs for .1 podSubnet that OVN GR generates for _, subnet := range hostSubnets { + gwLRPIPs = append(gwLRPIPs, util.GetNodeGatewayIfAddr(subnet).IP) gwLRPNetworks = append(gwLRPNetworks, util.GetNodeGatewayIfAddr(subnet).String()) } } + gwLRPMAC := util.IPAddrToHWAddr(gwLRPIPs[0]) var options map[string]string if enableGatewayMTU { options = map[string]string{ - "gateway_mtu": strconv.Itoa(config.Default.MTU), + libovsdbops.GatewayMTU: strconv.Itoa(config.Default.MTU), } } - logicalRouterPort := nbdb.LogicalRouterPort{ - Name: gwRouterPort, + + gwRouterPort := nbdb.LogicalRouterPort{ + Name: gw.getGWRouterPortName(), MAC: gwLRPMAC.String(), Networks: gwLRPNetworks, Options: options, } if gw.netInfo.IsSecondary() { - logicalRouterPort.ExternalIDs = map[string]string{ + gwRouterPort.ExternalIDs = map[string]string{ types.NetworkExternalID: gw.netInfo.GetNetworkName(), types.TopologyExternalID: gw.netInfo.TopologyType(), } _, isNetIPv6 := gw.netInfo.IPMode() if gw.netInfo.TopologyType() == types.Layer2Topology && isNetIPv6 && config.IPv6Mode { - logicalRouterPort.Ipv6RaConfigs = map[string]string{ + gwRouterPort.Ipv6RaConfigs = map[string]string{ "address_mode": "dhcpv6_stateful", "send_periodic": "true", "max_interval": "900", // 15 minutes @@ -449,24 +423,29 @@ func (gw *GatewayManager) GatewayInit( "router_preference": "LOW", // The static gateway configured by CNI is MEDIUM, so make this SLOW so it has less effect for pods } if gw.netInfo.MTU() > 0 { - logicalRouterPort.Ipv6RaConfigs["mtu"] = fmt.Sprintf("%d", gw.netInfo.MTU()) + gwRouterPort.Ipv6RaConfigs["mtu"] = fmt.Sprintf("%d", gw.netInfo.MTU()) } } } - err = libovsdbops.CreateOrUpdateLogicalRouterPort(gw.nbClient, &logicalRouter, - &logicalRouterPort, nil, &logicalRouterPort.MAC, &logicalRouterPort.Networks, - &logicalRouterPort.Options) + err := libovsdbops.CreateOrUpdateLogicalRouterPort(gw.nbClient, gwRouter, + &gwRouterPort, nil, &gwRouterPort.MAC, &gwRouterPort.Networks, + &gwRouterPort.Options) if err != nil { - return fmt.Errorf("failed to create port %+v on router %+v: %v", logicalRouterPort, logicalRouter, err) + return nil, fmt.Errorf("failed to create port %+v on router %+v: %v", gwRouterPort, gwRouter, err) } + return gwLRPIPs, nil +} + +func (gw *GatewayManager) updateGWRouterStaticRoutes(clusterIPSubnet, drLRPIfAddrs []*net.IPNet, + l3GatewayConfig *util.L3GatewayConfig, externalRouterPort string, gwRouter *nbdb.LogicalRouter) error { if len(drLRPIfAddrs) > 0 { for _, entry := range clusterIPSubnet { drLRPIfAddr, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(entry), drLRPIfAddrs) if err != nil { return fmt.Errorf("failed to add a static route in GR %s with distributed "+ "router as the nexthop: %v", - gatewayRouter, err) + gw.gwRouterName, err) } // TODO There has to be a better way to do this. It seems like the @@ -477,9 +456,9 @@ func (gw *GatewayManager) GatewayInit( // a better way to do it. Adding support for indirection in ModelClients // opModel (being able to operate on thins pointed to from another model) // would be a great way to simplify this. - updatedLogicalRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, &logicalRouter) + updatedGWRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, gwRouter) if err != nil { - return fmt.Errorf("unable to retrieve logical router %+v: %v", logicalRouter, err) + return fmt.Errorf("unable to retrieve logical router %+v: %v", gwRouter, err) } lrsr := nbdb.LogicalRouterStaticRoute{ @@ -494,51 +473,16 @@ func (gw *GatewayManager) GatewayInit( } p := func(item *nbdb.LogicalRouterStaticRoute) bool { return item.IPPrefix == lrsr.IPPrefix && libovsdbops.PolicyEqualPredicate(item.Policy, lrsr.Policy) && - util.SliceHasStringItem(updatedLogicalRouter.StaticRoutes, item.UUID) + util.SliceHasStringItem(updatedGWRouter.StaticRoutes, item.UUID) } - err = libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(gw.nbClient, gatewayRouter, &lrsr, p, + err = libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(gw.nbClient, gw.gwRouterName, &lrsr, p, &lrsr.Nexthop) if err != nil { - return fmt.Errorf("failed to add a static route %+v in GR %s with distributed router as the nexthop, err: %v", lrsr, gatewayRouter, err) + return fmt.Errorf("failed to add a static route %+v in GR %s with distributed router as the nexthop, err: %v", lrsr, gw.gwRouterName, err) } } } - if err := gw.addExternalSwitch("", - l3GatewayConfig.InterfaceID, - gatewayRouter, - l3GatewayConfig.MACAddress.String(), - physNetName(gw.netInfo), - l3GatewayConfig.IPAddresses, - l3GatewayConfig.VLANID); err != nil { - return err - } - - if l3GatewayConfig.EgressGWInterfaceID != "" { - if err := gw.addExternalSwitch(types.EgressGWSwitchPrefix, - l3GatewayConfig.EgressGWInterfaceID, - gatewayRouter, - l3GatewayConfig.EgressGWMACAddress.String(), - types.PhysicalNetworkExGwName, - l3GatewayConfig.EgressGWIPAddresses, - nil); err != nil { - return err - } - } - - externalRouterPort := types.GWRouterToExtSwitchPrefix + gatewayRouter - - nextHops := l3GatewayConfig.NextHops - - // Remove stale OVN resources with any old masquerade IP - if err := deleteStaleMasqueradeResources(gw.nbClient, gatewayRouter, nodeName, gw.watchFactory); err != nil { - return fmt.Errorf("failed to remove stale masquerade resources from northbound database: %w", err) - } - - if err := gateway.CreateDummyGWMacBindings(gw.nbClient, gatewayRouter, gw.netInfo); err != nil { - return err - } - for _, nextHop := range node.DummyNextHopIPs() { // Add return service route for OVN back to host prefix := config.Gateway.V4MasqueradeSubnet @@ -561,12 +505,14 @@ func (gw *GatewayManager) GatewayInit( return item.OutputPort != nil && *item.OutputPort == *lrsr.OutputPort && item.IPPrefix == lrsr.IPPrefix && libovsdbops.PolicyEqualPredicate(item.Policy, lrsr.Policy) } - err = libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(gw.nbClient, gatewayRouter, &lrsr, p, + err := libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(gw.nbClient, gw.gwRouterName, &lrsr, p, &lrsr.Nexthop) if err != nil { - return fmt.Errorf("error creating service static route %+v in GR %s: %v", lrsr, gatewayRouter, err) + return fmt.Errorf("error creating service static route %+v in GR %s: %v", lrsr, gw.gwRouterName, err) } } + + nextHops := l3GatewayConfig.NextHops // Add default gateway routes in GR for _, nextHop := range nextHops { var allIPs string @@ -591,13 +537,17 @@ func (gw *GatewayManager) GatewayInit( return item.OutputPort != nil && *item.OutputPort == *lrsr.OutputPort && item.IPPrefix == lrsr.IPPrefix && libovsdbops.PolicyEqualPredicate(lrsr.Policy, item.Policy) } - err := libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(gw.nbClient, gatewayRouter, &lrsr, + err := libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(gw.nbClient, gw.gwRouterName, &lrsr, p, &lrsr.Nexthop) if err != nil { - return fmt.Errorf("error creating static route %+v in GR %s: %v", lrsr, gatewayRouter, err) + return fmt.Errorf("error creating static route %+v in GR %s: %v", lrsr, gw.gwRouterName, err) } } + return nil +} + +func (gw *GatewayManager) updateClusterRouterStaticRoutes(hostSubnets []*net.IPNet, gwLRPIPs []net.IP) error { // We need to add a route to the Gateway router's IP, on the // cluster router, to ensure that the return traffic goes back // to the same gateway router @@ -692,17 +642,36 @@ func (gw *GatewayManager) GatewayInit( } } } + return nil +} +// syncNATsForGRIPChange updates the SNAT rules on the gateway router that are created outside the GatewayManager. +// Multiple handlers, like +// - DefaultNetworkController.addLogicalPort +// - DefaultNetworkController.updateNamespace +// - EgressIPController.addExternalGWPodSNATOps +// - EgressIPController.addPodEgressIPAssignment +// - SecondaryLayer2NetworkController.buildUDNEgressSNAT +// - SecondaryLayer3NetworkController.addUDNNodeSubnetEgressSNAT +// use gateway config parameters to create SNAT rules on the gateway router, but some of them (not all) don't watch +// gateway config changes and rely on the GatewayManager to update their SNAT rules. +// Is it racy? Yes! +// This function also updates SNAT created by `updateGWRouterNAT`, because NATs don't use ExternalIDs, +// and their fields are used to find equivalent NATs. That means on gateway IPs change, instead of updating +// the old NAT, we would create a new one. FIXME: add externalIDs to NATs +func (gw *GatewayManager) syncNATsForGRIPChange(externalIPs, oldExtIPs, gwLRPIPs []net.IP, + gwRouter, oldGWRouter *nbdb.LogicalRouter) error { // if config.Gateway.DisabledSNATMultipleGWs is not set (by default it is not), // the NAT rules for pods not having annotations to route through either external // gws or pod CNFs will be added within pods.go addLogicalPort var natsToUpdate []*nbdb.NAT // If l3gatewayAnnotation.IPAddresses changed, we need to update the SNATs on the GR oldNATs := []*nbdb.NAT{} - if oldLogicalRouter != nil { - oldNATs, err = libovsdbops.GetRouterNATs(gw.nbClient, oldLogicalRouter) + var err error + if oldGWRouter != nil { + oldNATs, err = libovsdbops.GetRouterNATs(gw.nbClient, oldGWRouter) if err != nil && errors.Is(err, libovsdbclient.ErrNotFound) { - return fmt.Errorf("unable to get NAT entries for router on node %s: %w", nodeName, err) + return fmt.Errorf("unable to get NAT entries for router %s: %w", oldGWRouter.Name, err) } } @@ -719,7 +688,7 @@ func (gw *GatewayManager) GatewayInit( for _, externalIP := range externalIPs { oldExternalIP, err := util.MatchFirstIPFamily(utilnet.IsIPv6(externalIP), oldExtIPs) if err != nil { - return fmt.Errorf("failed to update GW SNAT rule for pods on router %s error: %v", gatewayRouter, err) + return fmt.Errorf("failed to update GW SNAT rule for pods on router %s error: %v", gw.gwRouterName, err) } if externalIP.String() == oldExternalIP.String() { // no external ip change, skip @@ -741,7 +710,7 @@ func (gw *GatewayManager) GatewayInit( joinIP, err := util.MatchFirstIPFamily(utilnet.IsIPv6(parsedLogicalIP), gwLRPIPs) if err != nil { return fmt.Errorf("failed to find valid IP family match for join subnet IP: %s on "+ - "gateway router: %s, provided IPs: %#v", parsedLogicalIP, gatewayRouter, gwLRPIPs) + "gateway router: %s, provided IPs: %#v", parsedLogicalIP, gw.gwRouterName, gwLRPIPs) } if nat.LogicalIP != joinIP.String() { // needs to be updated @@ -755,12 +724,16 @@ func (gw *GatewayManager) GatewayInit( } if len(natsToUpdate) > 0 { - err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, &logicalRouter, natsToUpdate...) + err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, gwRouter, natsToUpdate...) if err != nil { - return fmt.Errorf("failed to update GW SNAT rule for pod on router %s error: %v", gatewayRouter, err) + return fmt.Errorf("failed to update GW SNAT rule for pod on router %s error: %v", gw.gwRouterName, err) } } + return nil +} +func (gw *GatewayManager) updateGWRouterNAT(nodeName string, clusterIPSubnet []*net.IPNet, l3GatewayConfig *util.L3GatewayConfig, + externalIPs, gwLRPIPs []net.IP, gwRouter *nbdb.LogicalRouter) error { // REMOVEME(trozet) workaround - create join subnet SNAT to handle ICMP needs frag return var extIDs map[string]string if gw.netInfo.IsSecondary() { @@ -774,7 +747,7 @@ func (gw *GatewayManager) GatewayInit( externalIP, err := util.MatchIPFamily(utilnet.IsIPv6(gwLRPIP), externalIPs) if err != nil { return fmt.Errorf("failed to find valid external IP family match for join subnet IP: %s on "+ - "gateway router: %s", gwLRPIP, gatewayRouter) + "gateway router: %s", gwLRPIP, gw.gwRouterName) } joinIPNet, err := util.GetIPNetFullMask(gwLRPIP.String()) if err != nil { @@ -783,45 +756,156 @@ func (gw *GatewayManager) GatewayInit( nat := libovsdbops.BuildSNAT(&externalIP[0], joinIPNet, "", extIDs) joinNATs = append(joinNATs, nat) } - err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, &logicalRouter, joinNATs...) + err := libovsdbops.CreateOrUpdateNATs(gw.nbClient, gwRouter, joinNATs...) if err != nil { - return fmt.Errorf("failed to create SNAT rule for join subnet on router %s error: %v", gatewayRouter, err) + return fmt.Errorf("failed to create SNAT rule for join subnet on router %s error: %v", gw.gwRouterName, err) } nats := make([]*nbdb.NAT, 0, len(clusterIPSubnet)) var nat *nbdb.NAT - if (!config.Gateway.DisableSNATMultipleGWs || gw.netInfo.IsPrimaryNetwork()) && !gw.isRoutingAdvertised(nodeName) { + // DisableSNATMultipleGWs is only applicable to cluster default network and not to user defined networks. + // For user defined networks, we always add SNAT rules regardless of whether the network is advertised or not. + if !config.Gateway.DisableSNATMultipleGWs || gw.netInfo.IsPrimaryNetwork() { // Default SNAT rules. DisableSNATMultipleGWs=false in LGW (traffic egresses via mp0) always. // We are not checking for gateway mode to be shared explicitly to reduce topology differences. for _, entry := range clusterIPSubnet { externalIP, err := util.MatchIPFamily(utilnet.IsIPv6CIDR(entry), externalIPs) if err != nil { return fmt.Errorf("failed to create default SNAT rules for gateway router %s: %v", - gatewayRouter, err) + gw.gwRouterName, err) } - nat = libovsdbops.BuildSNATWithMatch(&externalIP[0], entry, "", extIDs, gw.netInfo.GetNetworkScopedClusterSubnetSNATMatch(nodeName)) + // Get the match for this specific subnet's IP family + ipFamily := utilnet.IPv4 + if utilnet.IsIPv6CIDR(entry) { + ipFamily = utilnet.IPv6 + } + snatMatch, err := GetNetworkScopedClusterSubnetSNATMatch(gw.nbClient, gw.netInfo, nodeName, gw.isRoutingAdvertised(nodeName), ipFamily) + if err != nil { + return fmt.Errorf("failed to get SNAT match for node %s for network %s: %w", nodeName, gw.netInfo.GetNetworkName(), err) + } + + nat = libovsdbops.BuildSNATWithMatch(&externalIP[0], entry, "", extIDs, snatMatch) nats = append(nats, nat) } - err := libovsdbops.CreateOrUpdateNATs(gw.nbClient, &logicalRouter, nats...) + err = libovsdbops.CreateOrUpdateNATs(gw.nbClient, gwRouter, nats...) if err != nil { - return fmt.Errorf("failed to update SNAT rule for pod on router %s error: %v", gatewayRouter, err) + return fmt.Errorf("failed to update SNAT rule for pod on router %s error: %v", gw.gwRouterName, err) } } else { // ensure we do not have any leftover SNAT entries after an upgrade for _, logicalSubnet := range clusterIPSubnet { - nat = libovsdbops.BuildSNATWithMatch(nil, logicalSubnet, "", extIDs, gw.netInfo.GetNetworkScopedClusterSubnetSNATMatch(nodeName)) + nat = libovsdbops.BuildSNAT(nil, logicalSubnet, "", extIDs) nats = append(nats, nat) } - err := libovsdbops.DeleteNATs(gw.nbClient, &logicalRouter, nats...) + err = libovsdbops.DeleteNATs(gw.nbClient, gwRouter, nats...) if err != nil { - return fmt.Errorf("failed to delete GW SNAT rule for pod on router %s error: %v", gatewayRouter, err) + return fmt.Errorf("failed to delete GW SNAT rule for pod on router %s error: %v", gw.gwRouterName, err) } } - if err := gw.cleanupStalePodSNATs(nodeName, l3GatewayConfig.IPAddresses, gwLRPIPs); err != nil { + if err = gw.cleanupStalePodSNATs(nodeName, l3GatewayConfig.IPAddresses, gwLRPIPs); err != nil { return fmt.Errorf("failed to sync stale SNATs on node %s: %v", nodeName, err) } + return nil +} + +// gatewayInit creates a gateway router for the local chassis. +// enableGatewayMTU enables options:gateway_mtu for gateway routers. +func (gw *GatewayManager) gatewayInit( + nodeName string, + gwConfig *GatewayConfig, + enableGatewayMTU bool, +) error { + + // If l3gatewayAnnotation.IPAddresses changed, we need to update the perPodSNATs, + // so let's save the old value before we update the router for later use + var oldExtIPs []net.IP + oldLogicalRouter, err := libovsdbops.GetLogicalRouter(gw.nbClient, + &nbdb.LogicalRouter{ + Name: gw.gwRouterName, + }) + if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { + return fmt.Errorf("failed in retrieving %s, error: %v", gw.gwRouterName, err) + } + + if oldLogicalRouter != nil && oldLogicalRouter.ExternalIDs != nil { + if physicalIPs, ok := oldLogicalRouter.ExternalIDs["physical_ips"]; ok { + oldExternalIPs := strings.Split(physicalIPs, ",") + oldExtIPs = make([]net.IP, len(oldExternalIPs)) + for i, oldExternalIP := range oldExternalIPs { + cidr := oldExternalIP + util.GetIPFullMaskString(oldExternalIP) + ip, _, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("invalid cidr:%s error: %v", cidr, err) + } + oldExtIPs[i] = ip + } + } + } + + gwRouter, err := gw.createGWRouter(gwConfig.annoConfig, gwConfig.gwLRPJoinIPs) + if err != nil { + return err + } + + if err = gw.createGWRouterPeerPort(nodeName); err != nil { + return err + } + + gwLRPIPs, err := gw.createGWRouterPort(gwConfig.hostSubnets, gwConfig.gwLRPJoinIPs, enableGatewayMTU, gwRouter) + if err != nil { + return err + } + + if err := gw.addExternalSwitch("", + gwConfig.annoConfig.InterfaceID, + gw.gwRouterName, + gwConfig.annoConfig.MACAddress.String(), + physNetName(gw.netInfo), + gwConfig.annoConfig.IPAddresses, + gwConfig.annoConfig.VLANID); err != nil { + return err + } + + if gwConfig.annoConfig.EgressGWInterfaceID != "" { + if err := gw.addExternalSwitch(types.EgressGWSwitchPrefix, + gwConfig.annoConfig.EgressGWInterfaceID, + gw.gwRouterName, + gwConfig.annoConfig.EgressGWMACAddress.String(), + types.PhysicalNetworkExGwName, + gwConfig.annoConfig.EgressGWIPAddresses, + nil); err != nil { + return err + } + } + + // Remove stale OVN resources with any old masquerade IP + if err := deleteStaleMasqueradeResources(gw.nbClient, gw.gwRouterName, nodeName, gw.watchFactory); err != nil { + return fmt.Errorf("failed to remove stale masquerade resources from northbound database: %w", err) + } + + if err := gateway.CreateDummyGWMacBindings(gw.nbClient, gw.gwRouterName, gw.netInfo); err != nil { + return err + } + + externalRouterPort := types.GWRouterToExtSwitchPrefix + gw.gwRouterName + if err = gw.updateGWRouterStaticRoutes(gwConfig.clusterSubnets, gwConfig.ovnClusterLRPToJoinIfAddrs, gwConfig.annoConfig, externalRouterPort, + gwRouter); err != nil { + return err + } + + if err = gw.updateClusterRouterStaticRoutes(gwConfig.hostSubnets, gwLRPIPs); err != nil { + return err + } + + if err = gw.syncNATsForGRIPChange(gwConfig.externalIPs, oldExtIPs, gwLRPIPs, gwRouter, oldLogicalRouter); err != nil { + return err + } + + if err = gw.updateGWRouterNAT(nodeName, gwConfig.clusterSubnets, gwConfig.annoConfig, gwConfig.externalIPs, gwLRPIPs, gwRouter); err != nil { + return err + } // recording gateway mode metrics here after gateway setup is done metrics.RecordEgressRoutingViaHost() @@ -829,6 +913,37 @@ func (gw *GatewayManager) GatewayInit( return nil } +// GetNetworkScopedClusterSubnetSNATMatch returns the match for the SNAT rule for the cluster default network +// and the match for the SNAT rule for the L3/L2 user defined network. +// If the network is not advertised: +// - For Layer2 topology, the match is the output port of the GR to the join switch since in L2 there is only 1 router but two cSNATs. +// - For Layer3 topology, the match is empty. +// If the network is advertised: +// - For Layer2 topology, the match is the output port of the GR to the join switch and the destination must be a nodeIP in the cluster. +// - For Layer3 topology, the match is the destination must be a nodeIP in the cluster. +func GetNetworkScopedClusterSubnetSNATMatch(nbClient libovsdbclient.Client, netInfo util.NetInfo, nodeName string, isNetworkAdvertised bool, ipFamily utilnet.IPFamily) (string, error) { + if !isNetworkAdvertised { + if netInfo.TopologyType() != types.Layer2Topology { + return "", nil + } + return fmt.Sprintf("outport == %q", types.GWRouterToExtSwitchPrefix+netInfo.GetNetworkScopedGWRouterName(nodeName)), nil + } else { + // if the network is advertised, we need to ensure that the SNAT exists with the correct conditional destination match + dbIDs := getEgressIPAddrSetDbIDs(NodeIPAddrSetName, types.DefaultNetworkName, DefaultNetworkControllerName) + addressSetFactory := addressset.NewOvnAddressSetFactory(nbClient, config.IPv4Mode, config.IPv6Mode) + addrSet, err := addressSetFactory.GetAddressSet(dbIDs) + if err != nil { + return "", fmt.Errorf("cannot ensure that addressSet %s exists %v", NodeIPAddrSetName, err) + } + ipv4ClusterNodeIPAS, ipv6ClusterNodeIPAS := addrSet.GetASHashNames() + destinationMatch := getClusterNodesDestinationBasedSNATMatch(ipv4ClusterNodeIPAS, ipv6ClusterNodeIPAS, ipFamily) + if netInfo.TopologyType() != types.Layer2Topology { + return destinationMatch, nil + } + return fmt.Sprintf("outport == %q && (%s)", types.GWRouterToExtSwitchPrefix+netInfo.GetNetworkScopedGWRouterName(nodeName), destinationMatch), nil + } +} + // addExternalSwitch creates a switch connected to the external bridge and connects it to // the gateway router func (gw *GatewayManager) addExternalSwitch(prefix, interfaceID, gatewayRouter, macAddress, physNetworkName string, ipAddresses []*net.IPNet, vlanID *uint) error { @@ -897,7 +1012,7 @@ func (gw *GatewayManager) addExternalSwitch(prefix, interfaceID, gatewayRouter, Name: externalSwitchPortToRouter, Type: "router", Options: map[string]string{ - "router-port": externalRouterPort, + libovsdbops.RouterPort: externalRouterPort, // This option will program OVN to start sending GARPs for all external IPS // that the logical switch port has been configured to use. This is @@ -1080,17 +1195,8 @@ func (gw *GatewayManager) Cleanup() error { // Get the gateway router port's IP address (connected to join switch) var nextHops []net.IP - gwRouterToJoinSwitchPortName := types.GWRouterToJoinSwitchPrefix + gw.gwRouterName - portName := types.JoinSwitchToGWRouterPrefix + gw.gwRouterName - - // In Layer2 networks there is no join switch and the gw.joinSwitchName points to the cluster switch. - // Ensure that the ports are named appropriately, this is important for the logical router policies - // created for local node access. - // TODO(kyrtapz): Clean this up for clarity as part of https://github.com/ovn-org/ovn-kubernetes/issues/4689 - if gw.netInfo.TopologyType() == types.Layer2Topology { - gwRouterToJoinSwitchPortName = types.RouterToSwitchPrefix + gw.joinSwitchName - portName = types.SwitchToRouterPrefix + gw.joinSwitchName - } + gwRouterToJoinSwitchPortName := gw.getGWRouterPortName() + portName := gw.getGWRouterPeerPortName() gwIPAddrs, err := libovsdbutil.GetLRPAddrs(gw.nbClient, gwRouterToJoinSwitchPortName) if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { @@ -1116,16 +1222,6 @@ func (gw *GatewayManager) Cleanup() error { return fmt.Errorf("failed to delete logical switch port %s from switch %s: %w", portName, sw.Name, err) } - // Remove the logical router port on the gateway router that connects to the join switch - logicalRouter := nbdb.LogicalRouter{Name: gw.gwRouterName} - logicalRouterPort := nbdb.LogicalRouterPort{ - Name: gwRouterToJoinSwitchPortName, - } - err = libovsdbops.DeleteLogicalRouterPorts(gw.nbClient, &logicalRouter, &logicalRouterPort) - if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { - return fmt.Errorf("failed to delete port %s on router %s: %w", logicalRouterPort.Name, gw.gwRouterName, err) - } - // Remove the static mac bindings of the gateway router err = gateway.DeleteDummyGWMacBindings(gw.nbClient, gw.gwRouterName, gw.netInfo) if err != nil { @@ -1133,6 +1229,7 @@ func (gw *GatewayManager) Cleanup() error { } // Remove the gateway router associated with nodeName + logicalRouter := nbdb.LogicalRouter{Name: gw.gwRouterName} err = libovsdbops.DeleteLogicalRouter(gw.nbClient, &logicalRouter) if err != nil && !errors.Is(err, libovsdbclient.ErrNotFound) { return fmt.Errorf("failed to delete gateway router %s: %w", gw.gwRouterName, err) @@ -1271,26 +1368,26 @@ func (gw *GatewayManager) isRoutingAdvertised(node string) bool { return util.IsPodNetworkAdvertisedAtNode(gw.netInfo, node) } -func (gw *GatewayManager) syncGatewayLogicalNetwork( +// SyncGateway ensures a node's gateway router is configured according to the L3 config and host subnets +func (gw *GatewayManager) SyncGateway( node *corev1.Node, - l3GatewayConfig *util.L3GatewayConfig, - hostSubnets []*net.IPNet, - hostAddrs []string, - clusterSubnets []*net.IPNet, - grLRPJoinIPs []*net.IPNet, - ovnClusterLRPToJoinIfAddrs []*net.IPNet, - externalIPs []net.IP, + gwConfig *GatewayConfig, ) error { + if gwConfig.annoConfig.Mode == config.GatewayModeDisabled { + if err := gw.Cleanup(); err != nil { + return fmt.Errorf("error cleaning up gateway for node %s: %v", node.Name, err) + } + return nil + } + if gwConfig.hostSubnets == nil { + return nil + } + enableGatewayMTU := util.ParseNodeGatewayMTUSupport(node) - err := gw.GatewayInit( + err := gw.gatewayInit( node.Name, - clusterSubnets, - hostSubnets, - l3GatewayConfig, - grLRPJoinIPs, // the joinIP allocated to this node's GR for this controller's network - ovnClusterLRPToJoinIfAddrs, - externalIPs, + gwConfig, enableGatewayMTU, ) if err != nil { @@ -1301,16 +1398,16 @@ func (gw *GatewayManager) syncGatewayLogicalNetwork( if gw.clusterRouterName == "" { routerName = gw.gwRouterName } - for _, subnet := range hostSubnets { + for _, subnet := range gwConfig.hostSubnets { mgmtIfAddr := util.GetNodeManagementIfAddr(subnet) if mgmtIfAddr == nil { return fmt.Errorf("management interface address not found for subnet %q on network %q", subnet, gw.netInfo.GetNetworkName()) } - l3GatewayConfigIP, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6(mgmtIfAddr.IP), l3GatewayConfig.IPAddresses) + l3GatewayConfigIP, err := util.MatchFirstIPNetFamily(utilnet.IsIPv6(mgmtIfAddr.IP), gwConfig.annoConfig.IPAddresses) if err != nil { return fmt.Errorf("failed to extract the gateway IP addr for network %q: %v", gw.netInfo.GetNetworkName(), err) } - relevantHostIPs, err := util.MatchAllIPStringFamily(utilnet.IsIPv6(mgmtIfAddr.IP), hostAddrs) + relevantHostIPs, err := util.MatchAllIPStringFamily(utilnet.IsIPv6(mgmtIfAddr.IP), gwConfig.hostAddrs) if err != nil && err != util.ErrorNoIP { return fmt.Errorf("failed to extract the host IP addrs for network %q: %v", gw.netInfo.GetNetworkName(), err) } @@ -1329,37 +1426,6 @@ func (gw *GatewayManager) syncGatewayLogicalNetwork( return nil } -// syncNodeGateway ensures a node's gateway router is configured according to the L3 config and host subnets -func (gw *GatewayManager) syncNodeGateway( - node *corev1.Node, - l3GatewayConfig *util.L3GatewayConfig, - hostSubnets []*net.IPNet, - hostAddrs []string, - clusterSubnets, grLRPJoinIPs []*net.IPNet, - joinSwitchIPs []*net.IPNet, - externalIPs []net.IP, -) error { - if l3GatewayConfig.Mode == config.GatewayModeDisabled { - if err := gw.Cleanup(); err != nil { - return fmt.Errorf("error cleaning up gateway for node %s: %v", node.Name, err) - } - } else if hostSubnets != nil { - if err := gw.syncGatewayLogicalNetwork( - node, - l3GatewayConfig, - hostSubnets, - hostAddrs, - clusterSubnets, - grLRPJoinIPs, // the joinIP allocated to this node for this controller's network - joinSwitchIPs, // the .1 of this controller's global joinSubnet - externalIPs, - ); err != nil { - return fmt.Errorf("error creating gateway for node %s: %v", node.Name, err) - } - } - return nil -} - func physNetName(netInfo util.NetInfo) string { if netInfo.IsDefault() || netInfo.IsPrimaryNetwork() { return types.PhysicalNetworkName diff --git a/go-controller/pkg/ovn/gateway/gateway.go b/go-controller/pkg/ovn/gateway/gateway.go index c6e10ab4a9..f716528810 100644 --- a/go-controller/pkg/ovn/gateway/gateway.go +++ b/go-controller/pkg/ovn/gateway/gateway.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/gateway_test.go b/go-controller/pkg/ovn/gateway_test.go index da48869991..893d17ad09 100644 --- a/go-controller/pkg/ovn/gateway_test.go +++ b/go-controller/pkg/ovn/gateway_test.go @@ -14,6 +14,7 @@ import ( utilnet "k8s.io/utils/net" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" @@ -43,7 +44,7 @@ func generateAdvertisedUDNIsolationExpectedNB(testData []libovsdbtest.TestData, passMatches = append(passMatches, fmt.Sprintf("(%s.src == %s && %s.dst == %s)", ipPrefix, subnet, ipPrefix, subnet)) } - passACL := libovsdbutil.BuildACL( + passACL := libovsdbutil.BuildACLWithDefaultTier( GetAdvertisedNetworkSubnetsPassACLdbIDs(DefaultNetworkControllerName, networkName, networkID), types.AdvertisedNetworkPassPriority, strings.Join(passMatches, " || "), @@ -64,6 +65,15 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN expectedNodeSwitch *nbdb.LogicalSwitch, nodeName string, clusterIPSubnets []*net.IPNet, hostSubnets []*net.IPNet, l3GatewayConfig *util.L3GatewayConfig, joinLRPIPs, defLRPIPs []*net.IPNet, skipSnat bool, nodeMgmtPortIP, gatewayMTU string) []libovsdbtest.TestData { + return generateGatewayInitExpectedNBWithPodNetworkAdvertised(testData, expectedOVNClusterRouter, expectedNodeSwitch, + nodeName, clusterIPSubnets, hostSubnets, l3GatewayConfig, joinLRPIPs, defLRPIPs, skipSnat, nodeMgmtPortIP, + gatewayMTU, false) // Default to no pod network advertised +} + +func generateGatewayInitExpectedNBWithPodNetworkAdvertised(testData []libovsdbtest.TestData, expectedOVNClusterRouter *nbdb.LogicalRouter, + expectedNodeSwitch *nbdb.LogicalSwitch, nodeName string, clusterIPSubnets []*net.IPNet, hostSubnets []*net.IPNet, + l3GatewayConfig *util.L3GatewayConfig, joinLRPIPs, defLRPIPs []*net.IPNet, skipSnat bool, nodeMgmtPortIP, + gatewayMTU string, isPodNetworkAdvertised bool) []libovsdbtest.TestData { GRName := "GR_" + nodeName gwSwitchPort := types.JoinSwitchToGWRouterPrefix + GRName @@ -87,7 +97,7 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN var options map[string]string if gatewayMTU != "" { options = map[string]string{ - "gateway_mtu": gatewayMTU, + libovsdbops.GatewayMTU: gatewayMTU, } } testData = append(testData, &nbdb.LogicalRouterPort{ @@ -213,6 +223,16 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN }, Networks: networks, }) + var egressNodeIPsASv4, egressNodeIPsASv6 *nbdb.AddressSet + if config.OVNKubernetesFeature.EnableEgressIP { + egressNodeIPsASv4, egressNodeIPsASv6 = buildEgressIPNodeAddressSets(physicalIPs) + if config.IPv4Mode { + testData = append(testData, egressNodeIPsASv4) + } + if config.IPv6Mode { + testData = append(testData, egressNodeIPsASv6) + } + } natUUIDs := make([]string, 0, len(clusterIPSubnets)) if !skipSnat { @@ -220,13 +240,30 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN natUUID := fmt.Sprintf("nat-%d-UUID", i) natUUIDs = append(natUUIDs, natUUID) physicalIP, _ := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(subnet), l3GatewayConfig.IPAddresses) - testData = append(testData, &nbdb.NAT{ + nat := nbdb.NAT{ UUID: natUUID, ExternalIP: physicalIP.IP.String(), LogicalIP: subnet.String(), Options: map[string]string{"stateless": "false"}, Type: nbdb.NATTypeSNAT, - }) + } + if config.Gateway.Mode != config.GatewayModeDisabled { + nat.ExternalPortRange = config.DefaultEphemeralPortRange + } + if isPodNetworkAdvertised { + // IPv6 pod network + if utilnet.IsIPv6CIDR(subnet) { + if egressNodeIPsASv6 != nil { + nat.Match = fmt.Sprintf("ip6.dst == $%s", egressNodeIPsASv6.Name) + } + } else { + // IPv4 pod network + if egressNodeIPsASv4 != nil { + nat.Match = fmt.Sprintf("ip4.dst == $%s", egressNodeIPsASv4.Name) + } + } + } + testData = append(testData, &nat) } } @@ -234,13 +271,17 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN natUUID := fmt.Sprintf("nat-join-%d-UUID", i) natUUIDs = append(natUUIDs, natUUID) joinLRPIP, _ := util.MatchFirstIPNetFamily(utilnet.IsIPv6CIDR(physicalIP), joinLRPIPs) - testData = append(testData, &nbdb.NAT{ + nat := nbdb.NAT{ UUID: natUUID, ExternalIP: physicalIP.IP.String(), LogicalIP: joinLRPIP.IP.String(), Options: map[string]string{"stateless": "false"}, Type: nbdb.NATTypeSNAT, - }) + } + if config.Gateway.Mode != config.GatewayModeDisabled { + nat.ExternalPortRange = config.DefaultEphemeralPortRange + } + testData = append(testData, &nat) } testData = append(testData, &nbdb.MeterBand{ @@ -336,7 +377,7 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN Type: "router", Addresses: []string{"router"}, Options: map[string]string{ - "router-port": gwRouterPort, + libovsdbops.RouterPort: gwRouterPort, }, }, &nbdb.LogicalSwitchPort{ @@ -344,7 +385,7 @@ func generateGatewayInitExpectedNB(testData []libovsdbtest.TestData, expectedOVN Name: externalSwitchPortToRouter, Type: "router", Options: map[string]string{ - "router-port": externalRouterPort, + libovsdbops.RouterPort: externalRouterPort, "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", }, @@ -394,6 +435,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { ginkgo.Context("Gateway Creation Operations Shared Gateway Mode", func() { ginkgo.BeforeEach(func() { config.Gateway.Mode = config.GatewayModeShared + config.Gateway.EphemeralPortRange = config.DefaultEphemeralPortRange }) ginkgo.It("creates an IPv4 gateway in OVN", func() { @@ -454,19 +496,23 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -567,19 +613,23 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -686,19 +736,23 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -771,19 +825,23 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { IPAddresses: ovntest.MustParseIPNets("169.255.33.2/24"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -849,7 +907,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } - + gwConfig := &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -861,14 +927,9 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { mgmtPortIP := "" // Disable option:gateway_mtu. - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, false, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -880,14 +941,9 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { // Enable option:gateway_mtu. expectedOVNClusterRouter.StaticRoutes = []string{} - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -947,6 +1003,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) @@ -958,14 +1023,9 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { // We don't set up the Allow from mgmt port ACL here mgmtPortIP := "" - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, false, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -978,15 +1038,11 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { ginkgo.By("modifying the node join IP") oldJoinLRPIPs := joinLRPIPs joinLRPIPs = ovntest.MustParseIPNets("100.64.0.99/16") + gwConfig.gwLRPJoinIPs = joinLRPIPs expectedOVNClusterRouter.StaticRoutes = []string{} - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1057,19 +1113,23 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("fd99::1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1133,6 +1193,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { IPAddresses: ovntest.MustParseIPNets("fd99::2/64"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) @@ -1141,14 +1210,9 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { config.IPv4Mode = false config.IPv6Mode = true - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1216,19 +1280,23 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1", "fd99::1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1293,20 +1361,24 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } config.Gateway.DisableSNATMultipleGWs = true var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1403,20 +1475,24 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } config.Gateway.DisableSNATMultipleGWs = true var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1441,6 +1517,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { ginkgo.BeforeEach(func() { config.Gateway.Mode = config.GatewayModeLocal config.IPv6Mode = false + config.Gateway.EphemeralPortRange = config.DefaultEphemeralPortRange }) ginkgo.It("creates a dual-stack gateway in OVN", func() { @@ -1515,19 +1592,23 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1", "fd99::1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1601,20 +1682,24 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } config.Gateway.DisableSNATMultipleGWs = true var err error fakeOvn.controller.defaultCOPPUUID, err = EnsureDefaultCOPP(fakeOvn.nbClient) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1682,7 +1767,7 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { &nbdb.LogicalRouterPort{ UUID: types.GWRouterToExtSwitchPrefix + types.GWRouterPrefix + nodeName + "-UUID", Name: types.GWRouterToExtSwitchPrefix + types.GWRouterPrefix + nodeName, - Options: map[string]string{"gateway_mtu": "1400"}, + Options: map[string]string{libovsdbops.GatewayMTU: "1400"}, }, expectedGR, expectedOVNClusterRouter, @@ -1716,6 +1801,15 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { NextHops: ovntest.MustParseIPs("169.255.33.1"), NodePortEnable: true, } + gwConfig := &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterIPSubnets, + gwLRPJoinIPs: joinLRPIPs, + hostAddrs: nil, + externalIPs: extractExternalIPs(l3GatewayConfig), + ovnClusterLRPToJoinIfAddrs: defLRPIPs, + } config.Gateway.DisableSNATMultipleGWs = true var err error @@ -1723,14 +1817,9 @@ var _ = ginkgo.Describe("Gateway Init Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) expectedOVNClusterRouter.StaticRoutes = []string{} - err = newGatewayManager(fakeOvn, nodeName).GatewayInit( + err = newGatewayManager(fakeOvn, nodeName).gatewayInit( nodeName, - clusterIPSubnets, - hostSubnets, - l3GatewayConfig, - joinLRPIPs, - defLRPIPs, - extractExternalIPs(l3GatewayConfig), + gwConfig, true, ) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go b/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go index e2866ba946..4f61101282 100644 --- a/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go +++ b/go-controller/pkg/ovn/gatewayrouter/policybasedroutes.go @@ -10,7 +10,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilnet "k8s.io/utils/net" - "github.com/ovn-org/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" diff --git a/go-controller/pkg/ovn/gress_policy.go b/go-controller/pkg/ovn/gress_policy.go index c8445e6ed5..ad20fadfb3 100644 --- a/go-controller/pkg/ovn/gress_policy.go +++ b/go-controller/pkg/ovn/gress_policy.go @@ -209,6 +209,10 @@ func (gp *gressPolicy) addNamespaceAddressSet(name string, asf addressset.Addres return false, fmt.Errorf("cannot add peer namespace %s: failed to get address set: %v", name, err) } v4HashName, v6HashName := as.GetASHashNames() + if v4HashName == "" && v6HashName == "" { + // This would happen when a namespace is not yet reconciled with UDN network. + return false, fmt.Errorf("cannot add peer namespace %s: address set has empty hashed name", name) + } v4HashName = "$" + v4HashName v6HashName = "$" + v6HashName @@ -234,6 +238,9 @@ func (gp *gressPolicy) addNamespaceAddressSet(name string, asf addressset.Addres func (gp *gressPolicy) delNamespaceAddressSet(name string) bool { dbIDs := getNamespaceAddrSetDbIDs(name, gp.controllerName) v4HashName, v6HashName := addressset.GetHashNamesForAS(dbIDs) + if v4HashName == "" && v6HashName == "" { + return false + } v4HashName = "$" + v4HashName v6HashName = "$" + v6HashName @@ -281,7 +288,7 @@ func (gp *gressPolicy) buildLocalPodACLs(portGroupName string, aclLogging *libov ipBlockMatches := gp.getMatchFromIPBlock(lportMatch, l4Match) for ipBlockIdx, ipBlockMatch := range ipBlockMatches { aclIDs := gp.getNetpolACLDbIDs(ipBlockIdx, protocol) - acl := libovsdbutil.BuildACL(aclIDs, types.DefaultAllowPriority, ipBlockMatch, action, + acl := libovsdbutil.BuildACLWithDefaultTier(aclIDs, types.DefaultAllowPriority, ipBlockMatch, action, aclLogging, gp.aclPipeline) createdACLs = append(createdACLs, acl) } @@ -302,7 +309,7 @@ func (gp *gressPolicy) buildLocalPodACLs(portGroupName string, aclLogging *libov addrSetMatch = fmt.Sprintf("%s && %s && %s", l3Match, l4Match, lportMatch) } aclIDs := gp.getNetpolACLDbIDs(emptyIdx, protocol) - acl := libovsdbutil.BuildACL(aclIDs, types.DefaultAllowPriority, addrSetMatch, action, + acl := libovsdbutil.BuildACLWithDefaultTier(aclIDs, types.DefaultAllowPriority, addrSetMatch, action, aclLogging, gp.aclPipeline) if l3Match == "" { // if l3Match is empty, then no address sets are selected for a given gressPolicy. diff --git a/go-controller/pkg/ovn/hybrid.go b/go-controller/pkg/ovn/hybrid.go index f7debe4ec8..41f98075f1 100644 --- a/go-controller/pkg/ovn/hybrid.go +++ b/go-controller/pkg/ovn/hybrid.go @@ -12,7 +12,7 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" hotypes "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/types" houtil "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/util" @@ -137,12 +137,18 @@ func (oc *DefaultNetworkController) handleHybridOverlayPort(node *corev1.Node, a } func (oc *DefaultNetworkController) deleteHybridOverlayPort(node *corev1.Node) error { - klog.Infof("Removing node %s hybrid overlay port", node.Name) portName := util.GetHybridOverlayPortName(node.Name) lsp := nbdb.LogicalSwitchPort{Name: portName} - sw := nbdb.LogicalSwitch{Name: oc.GetNetworkScopedSwitchName(node.Name)} - if err := libovsdbops.DeleteLogicalSwitchPorts(oc.nbClient, &sw, &lsp); err != nil { - return err + if _, err := libovsdbops.GetLogicalSwitchPort(oc.nbClient, &lsp); err != nil { + if !errors.Is(err, libovsdbclient.ErrNotFound) { + return fmt.Errorf("failed to get logical switch port for hybrid overlay port %s, err: %v", portName, err) + } + } else { + sw := nbdb.LogicalSwitch{Name: oc.GetNetworkScopedSwitchName(node.Name)} + klog.Infof("Removing node %s hybrid overlay port", node.Name) + if err := libovsdbops.DeleteLogicalSwitchPorts(oc.nbClient, &sw, &lsp); err != nil { + return err + } } if err := oc.removeHybridLRPolicySharedGW(node); err != nil { return err @@ -171,7 +177,7 @@ func (oc *DefaultNetworkController) setupHybridLRPolicySharedGw(nodeSubnets []*n // In cases of OpenShift SDN live migration, where config.HybridOverlay.ClusterSubnets is not provided, we // use the host subnets allocated by OpenShiftSDN as the hybrid-overlay-node-subnet and set up hybrid // overlay routes/policies to these subnets. - nodes, err := oc.kube.GetNodes() + nodes, err := oc.watchFactory.GetNodes() if err != nil { return err } @@ -407,7 +413,7 @@ func (oc *DefaultNetworkController) removeRoutesToHONodeSubnet(nodeName string, } // Delete routes to HO subnet from GRs - nodes, err := oc.kube.GetNodes() + nodes, err := oc.watchFactory.GetNodes() if err != nil { return err } diff --git a/go-controller/pkg/ovn/hybrid_test.go b/go-controller/pkg/ovn/hybrid_test.go index a65294f345..4b01354429 100644 --- a/go-controller/pkg/ovn/hybrid_test.go +++ b/go-controller/pkg/ovn/hybrid_test.go @@ -20,7 +20,7 @@ import ( "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/record" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" hotypes "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/types" cm "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/clustermanager" @@ -335,10 +335,6 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - hostAddrs, err := util.ParseNodeHostCIDRsDropNetMask(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) f, err = factory.NewMasterWatchFactory(fakeClient.GetMasterClientset()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -429,7 +425,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { }, 2).Should(gomega.HaveKeyWithValue(hotypes.HybridOverlayDRIP, nodeHOIP)) subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = clusterController.syncDefaultGatewayLogicalNetwork(updatedNode, l3GatewayConfig, []*net.IPNet{subnet}, hostAddrs.UnsortedList()) + err = clusterController.syncNodeGateway(updatedNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) var clusterSubnets []*net.IPNet @@ -617,10 +613,6 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - hostAddrs, err := util.ParseNodeHostCIDRsDropNetMask(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) f, err = factory.NewMasterWatchFactory(fakeClient.GetMasterClientset()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -705,7 +697,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { setupCOPP := true setupClusterController(clusterController, setupCOPP) - err = clusterController.syncDefaultGatewayLogicalNetwork(updatedNode, l3GatewayConfig, []*net.IPNet{subnet}, hostAddrs.UnsortedList()) + err = clusterController.syncNodeGateway(updatedNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) //assuming all the pods have finished processing @@ -826,10 +818,6 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - hostAddrs, err := util.ParseNodeHostCIDRsDropNetMask(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) f, err = factory.NewMasterWatchFactory(fakeClient.GetMasterClientset()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -912,7 +900,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { }, 2).Should(gomega.HaveKeyWithValue(hotypes.HybridOverlayDRMAC, nodeHOMAC)) subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = clusterController.syncDefaultGatewayLogicalNetwork(updatedNode, l3GatewayConfig, []*net.IPNet{subnet}, hostAddrs.UnsortedList()) + err = clusterController.syncNodeGateway(updatedNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) var clusterSubnets []*net.IPNet @@ -1124,10 +1112,6 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode1.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - hostAddrs, err := util.ParseNodeHostCIDRsDropNetMask(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) f, err = factory.NewMasterWatchFactory(fakeClient.GetMasterClientset()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1214,8 +1198,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { }, 2).Should(gomega.HaveKeyWithValue(hotypes.HybridOverlayDRMAC, nodeHOMAC)) //ensure hybrid overlay elements have been added - subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = clusterController.syncDefaultGatewayLogicalNetwork(updatedNode, l3GatewayConfig, []*net.IPNet{subnet}, hostAddrs.UnsortedList()) + err = clusterController.syncNodeGateway(updatedNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(func() ([]*nbdb.LogicalRouterStaticRoute, error) { @@ -1337,10 +1320,6 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - hostAddrs, err := util.ParseNodeHostCIDRsDropNetMask(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) f, err = factory.NewMasterWatchFactory(fakeClient.GetMasterClientset()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1419,8 +1398,7 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { return updatedNode.Annotations, nil }, 2).Should(gomega.HaveKeyWithValue(hotypes.HybridOverlayDRMAC, nodeHOMAC)) - subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = clusterController.syncDefaultGatewayLogicalNetwork(updatedNode, l3GatewayConfig, []*net.IPNet{subnet}, hostAddrs.UnsortedList()) + err = clusterController.syncNodeGateway(updatedNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // switch the node to a ovn node @@ -1475,229 +1453,6 @@ var _ = ginkgo.Describe("Hybrid SDN Master Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - ginkgo.It("cleans up a Linux node when the OVN hostsubnet annotation is removed", func() { - app.Action = func(ctx *cli.Context) error { - const ( - nodeHOMAC string = "0a:58:0a:01:01:03" - hoSubnet string = "11.1.0.0/16" - nodeHOIP string = "10.1.1.3" - ) - node1 := tNode{ - Name: "node1", - NodeIP: "1.2.3.4", - NodeLRPMAC: "0a:58:0a:01:01:01", - LrpIP: "100.64.0.2", - DrLrpIP: "100.64.0.1", - PhysicalBridgeMAC: "11:22:33:44:55:66", - SystemID: "cb9ec8fa-b409-4ef3-9f42-d9283c47aac6", - NodeSubnet: "10.1.1.0/24", - GWRouter: types.GWRouterPrefix + "node1", - GatewayRouterIPMask: "172.16.16.2/24", - GatewayRouterIP: "172.16.16.2", - GatewayRouterNextHop: "172.16.16.1", - PhysicalBridgeName: "br-eth0", - NodeGWIP: "10.1.1.1/24", - NodeMgmtPortIP: "10.1.1.2", - //NodeMgmtPortMAC: "0a:58:0a:01:01:02", - NodeMgmtPortMAC: "0a:58:64:40:00:03", - DnatSnatIP: "169.254.0.1", - } - testNode := node1.k8sNode("2") - - kubeFakeClient := fake.NewSimpleClientset(&corev1.NodeList{ - Items: []corev1.Node{testNode}, - }) - egressFirewallFakeClient := &egressfirewallfake.Clientset{} - egressIPFakeClient := &egressipfake.Clientset{} - egressQoSFakeClient := &egressqosfake.Clientset{} - egressServiceFakeClient := &egressservicefake.Clientset{} - fakeClient := &util.OVNMasterClientset{ - KubeClient: kubeFakeClient, - EgressIPClient: egressIPFakeClient, - EgressFirewallClient: egressFirewallFakeClient, - EgressQoSClient: egressQoSFakeClient, - EgressServiceClient: egressServiceFakeClient, - } - - vlanID := 1024 - _, err := config.InitConfig(ctx, nil, nil) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - config.Kubernetes.HostNetworkNamespace = "" - nodeAnnotator := kube.NewNodeAnnotator(&kube.Kube{KClient: kubeFakeClient}, testNode.Name) - l3Config := node1.gatewayConfig(config.GatewayModeShared, uint(vlanID)) - err = util.SetL3GatewayConfig(nodeAnnotator, l3Config) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = util.UpdateNodeManagementPortMACAddresses(&testNode, nodeAnnotator, - ovntest.MustParseMAC(node1.NodeMgmtPortMAC), types.DefaultNetworkName) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = util.SetNodeHostSubnetAnnotation(nodeAnnotator, ovntest.MustParseIPNets(node1.NodeSubnet)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = util.SetNodeHostCIDRs(nodeAnnotator, sets.New(fmt.Sprintf("%s/24", node1.NodeIP))) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = nodeAnnotator.Run() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - hostAddrs, err := util.ParseNodeHostCIDRsDropNetMask(updatedNode) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - f, err = factory.NewMasterWatchFactory(fakeClient) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = f.Start() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - expectedClusterLBGroup := newLoadBalancerGroup(types.ClusterLBGroupName) - expectedSwitchLBGroup := newLoadBalancerGroup(types.ClusterSwitchLBGroupName) - expectedRouterLBGroup := newLoadBalancerGroup(types.ClusterRouterLBGroupName) - expectedOVNClusterRouter := newOVNClusterRouter() - ovnClusterRouterLRP := &nbdb.LogicalRouterPort{ - Name: types.GWRouterToJoinSwitchPrefix + types.OVNClusterRouter, - Networks: []string{"100.64.0.1/16"}, - UUID: types.GWRouterToJoinSwitchPrefix + types.OVNClusterRouter + "-UUID", - } - expectedOVNClusterRouter.Ports = []string{ovnClusterRouterLRP.UUID} - expectedNodeSwitch := node1.logicalSwitch([]string{expectedClusterLBGroup.UUID, expectedSwitchLBGroup.UUID}) - expectedClusterRouterPortGroup := newRouterPortGroup() - expectedClusterPortGroup := newClusterPortGroup() - - dbSetup := libovsdbtest.TestSetup{ - NBData: []libovsdbtest.TestData{ - newClusterJoinSwitch(), - expectedNodeSwitch, - ovnClusterRouterLRP, - expectedOVNClusterRouter, - expectedClusterRouterPortGroup, - expectedClusterPortGroup, - expectedClusterLBGroup, - expectedSwitchLBGroup, - expectedRouterLBGroup, - }, - } - var libovsdbOvnNBClient, libovsdbOvnSBClient libovsdbclient.Client - libovsdbOvnNBClient, libovsdbOvnSBClient, libovsdbCleanup, err = libovsdbtest.NewNBSBTestHarness(dbSetup) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - expectedDatabaseState := []libovsdbtest.TestData{ovnClusterRouterLRP} - expectedDatabaseState = addNodeLogicalFlows(expectedDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1) - - clusterController, err := NewOvnController( - fakeClient, - f, - stopChan, - nil, - networkmanager.Default().Interface(), - libovsdbOvnNBClient, - libovsdbOvnSBClient, - record.NewFakeRecorder(10), - wg, - nil, - NewPortCache(stopChan), - ) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - setupCOPP := true - setupClusterController(clusterController, setupCOPP) - - //assuming all the pods have finished processing - atomic.StoreUint32(&clusterController.allInitialPodsProcessed, 1) - // Let the real code run and ensure OVN database sync - gomega.Expect(clusterController.WatchNodes()).To(gomega.Succeed()) - - gomega.Eventually(func() (map[string]string, error) { - updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return updatedNode.Annotations, nil - }, 2).Should(gomega.HaveKeyWithValue(hotypes.HybridOverlayDRMAC, nodeHOMAC)) - - subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = clusterController.syncDefaultGatewayLogicalNetwork(updatedNode, l3GatewayConfig, []*net.IPNet{subnet}, hostAddrs.UnsortedList()) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - var clusterSubnets []*net.IPNet - for _, clusterSubnet := range config.Default.ClusterSubnets { - clusterSubnets = append(clusterSubnets, clusterSubnet.CIDR) - } - - skipSnat := false - expectedDatabaseState = generateGatewayInitExpectedNB(expectedDatabaseState, expectedOVNClusterRouter, - expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3Config, - []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, skipSnat, - node1.NodeMgmtPortIP, "1400") - - hybridSubnetStaticRoute1, hybridLogicalRouterStaticRoute, hybridSubnetLRP1, hybridSubnetLRP2, hybridLogicalSwitchPort := setupHybridOverlayOVNObjects(node1, "", hoSubnet, nodeHOIP, nodeHOMAC) - - var node1LogicalRouter *nbdb.LogicalRouter - var basicNode1StaticRoutes []string - - for _, obj := range expectedDatabaseState { - if logicalRouter, ok := obj.(*nbdb.LogicalRouter); ok { - if logicalRouter.Name == "GR_node1" { - // keep a referance so that we can edit this object - node1LogicalRouter = logicalRouter - basicNode1StaticRoutes = logicalRouter.StaticRoutes - logicalRouter.StaticRoutes = append(logicalRouter.StaticRoutes, hybridLogicalRouterStaticRoute.UUID) - } - } - } - - // keep copies of these before appending hybrid overlay elements - basicExpectedNodeSwitchPorts := expectedNodeSwitch.Ports - basicExpectedOVNClusterRouterPolicies := expectedOVNClusterRouter.Policies - basicExpectedOVNClusterStaticRoutes := expectedOVNClusterRouter.StaticRoutes - - expectedNodeSwitch.Ports = append(expectedNodeSwitch.Ports, hybridLogicalSwitchPort.UUID) - expectedOVNClusterRouter.Policies = append(expectedOVNClusterRouter.Policies, hybridSubnetLRP1.UUID, hybridSubnetLRP2.UUID) - expectedOVNClusterRouter.StaticRoutes = append(expectedOVNClusterRouter.StaticRoutes, hybridSubnetStaticRoute1.UUID) - - expectedDatabaseStateWithHybridNode := append([]libovsdbtest.TestData{hybridSubnetStaticRoute1, hybridSubnetLRP2, hybridSubnetLRP1, hybridLogicalSwitchPort, hybridLogicalRouterStaticRoute}, expectedDatabaseState...) - expectedStaticMACBinding := &nbdb.StaticMACBinding{ - UUID: "MAC-binding-HO-UUID", - IP: nodeHOIP, - LogicalPort: "rtos-node1", - MAC: nodeHOMAC, - OverrideDynamicMAC: true, - } - expectedDatabaseStateWithHybridNode = append(expectedDatabaseStateWithHybridNode, expectedStaticMACBinding) - gomega.Eventually(libovsdbOvnNBClient).Should(libovsdbtest.HaveData(expectedDatabaseStateWithHybridNode)) - - nodeAnnotator = kube.NewNodeAnnotator(&kube.Kube{KClient: kubeFakeClient}, testNode.Name) - util.DeleteNodeHostSubnetAnnotation(nodeAnnotator) - err = nodeAnnotator.Run() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - gomega.Eventually(func() (map[string]string, error) { - updatedNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return updatedNode.Annotations, nil - }, 5).ShouldNot(gomega.HaveKey(hotypes.HybridOverlayDRMAC)) - - // restore values from the non-hybrid versions - expectedNodeSwitch.Ports = basicExpectedNodeSwitchPorts - expectedOVNClusterRouter.Policies = basicExpectedOVNClusterRouterPolicies - expectedOVNClusterRouter.StaticRoutes = basicExpectedOVNClusterStaticRoutes - node1LogicalRouter.StaticRoutes = basicNode1StaticRoutes - - gomega.Eventually(libovsdbOvnNBClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) - - return nil - } - err := app.Run([]string{ - app.Name, - "-cluster-subnets=" + clusterCIDR, - "-gateway-mode=shared", - "-enable-hybrid-overlay", - "-hybrid-overlay-cluster-subnets=" + hybridOverlayClusterCIDR, - }) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - ginkgo.It("cleans up a Linux node that has hybridOverlay annotations and database objects when hybrid overlay is disabled", func() { app.Action = func(ctx *cli.Context) error { const ( diff --git a/go-controller/pkg/ovn/kubevirt_test.go b/go-controller/pkg/ovn/kubevirt_test.go index 1a7dd5fcae..342aad35d4 100644 --- a/go-controller/pkg/ovn/kubevirt_test.go +++ b/go-controller/pkg/ovn/kubevirt_test.go @@ -146,10 +146,6 @@ var _ = Describe("OVN Kubevirt Operations", func() { addressIPv6: "fd11::3", }, } - logicalSwitch *nbdb.LogicalSwitch - ovnClusterRouter *nbdb.LogicalRouter - logicalRouterPort *nbdb.LogicalRouterPort - migrationSourceLSRP, migrationTargetLSRP *nbdb.LogicalSwitchPort lrpIP = func(network string) string { return strings.Split(network, "/")[0] @@ -497,6 +493,12 @@ var _ = Describe("OVN Kubevirt Operations", func() { Context("during execution", func() { DescribeTable("reconcile migratable vm pods", func(t testData) { + var ( + logicalSwitch *nbdb.LogicalSwitch + ovnClusterRouter *nbdb.LogicalRouter + logicalRouterPort *nbdb.LogicalRouterPort + migrationSourceLSRP, migrationTargetLSRP *nbdb.LogicalSwitchPort + ) _, parsedClusterCIDRIPv4, err := net.ParseCIDR(clusterCIDRIPv4) Expect(err).ToNot(HaveOccurred()) @@ -540,8 +542,8 @@ var _ = Describe("OVN Kubevirt Operations", func() { UUID: ovntypes.SwitchToRouterPrefix + t.nodeName + "-UUID", Type: "router", Options: map[string]string{ - "router-port": logicalRouterPort.Name, - "arp_proxy": kubevirt.ComposeARPProxyLSPOption(), + libovsdbops.RouterPort: logicalRouterPort.Name, + "arp_proxy": kubevirt.ComposeARPProxyLSPOption(), }, } logicalSwitch = &nbdb.LogicalSwitch{ @@ -600,8 +602,8 @@ var _ = Describe("OVN Kubevirt Operations", func() { UUID: ovntypes.SwitchToRouterPrefix + t.migrationTarget.nodeName + "-UUID", Type: "router", Options: map[string]string{ - "router-port": migrationTargetLRP.Name, - "arp_proxy": kubevirt.ComposeARPProxyLSPOption(), + libovsdbops.RouterPort: migrationTargetLRP.Name, + "arp_proxy": kubevirt.ComposeARPProxyLSPOption(), }, } migrationTargetLS = &nbdb.LogicalSwitch{ @@ -665,6 +667,7 @@ var _ = Describe("OVN Kubevirt Operations", func() { Annotations: map[string]string{ "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf(`{"ipv4": %q, "ipv6": %q}`, nodeByName[node1].transitSwitchPortIPv4, nodeByName[node1].transitSwitchPortIPv6), "k8s.ovn.org/node-subnets": fmt.Sprintf(`{"default":[%q,%q]}`, nodeByName[node1].subnetIPv4, nodeByName[node1].subnetIPv6), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s", "ipv6":"%s"}}`, nodeByName[node1].lrpNetworkIPv4, nodeByName[node1].lrpNetworkIPv6), }, }, }, @@ -674,6 +677,7 @@ var _ = Describe("OVN Kubevirt Operations", func() { Annotations: map[string]string{ "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf(`{"ipv4": %q, "ipv6": %q}`, nodeByName[node2].transitSwitchPortIPv4, nodeByName[node2].transitSwitchPortIPv6), "k8s.ovn.org/node-subnets": fmt.Sprintf(`{"default":[%q,%q]}`, nodeByName[node2].subnetIPv4, nodeByName[node2].subnetIPv6), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s", "ipv6":"%s"}}`, nodeByName[node2].lrpNetworkIPv4, nodeByName[node2].lrpNetworkIPv6), }, }, }, @@ -683,6 +687,7 @@ var _ = Describe("OVN Kubevirt Operations", func() { Annotations: map[string]string{ "k8s.ovn.org/node-transit-switch-port-ifaddr": fmt.Sprintf(`{"ipv4": %q, "ipv6": %q}`, nodeByName[node3].transitSwitchPortIPv4, nodeByName[node3].transitSwitchPortIPv6), "k8s.ovn.org/node-subnets": fmt.Sprintf(`{"default":[%q,%q]}`, nodeByName[node3].subnetIPv4, nodeByName[node3].subnetIPv6), + util.OVNNodeGRLRPAddrs: fmt.Sprintf(`{"default":{"ipv4":"%s", "ipv6":"%s"}}`, nodeByName[node3].lrpNetworkIPv4, nodeByName[node3].lrpNetworkIPv6), }, }, }, diff --git a/go-controller/pkg/ovn/master.go b/go-controller/pkg/ovn/master.go index 26eb1277fe..b5394c7ffe 100644 --- a/go-controller/pkg/ovn/master.go +++ b/go-controller/pkg/ovn/master.go @@ -11,7 +11,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" hotypes "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/types" houtil "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/util" @@ -33,6 +33,16 @@ const ( OvnNodeAnnotationRetryTimeout = 1 * time.Second ) +type GatewayConfig struct { + annoConfig *util.L3GatewayConfig + hostSubnets []*net.IPNet + clusterSubnets []*net.IPNet + gwLRPJoinIPs []*net.IPNet + hostAddrs []string + externalIPs []net.IP + ovnClusterLRPToJoinIfAddrs []*net.IPNet +} + // SetupMaster creates the central router and load-balancers for the network func (oc *DefaultNetworkController) SetupMaster() error { // Create default Control Plane Protection (COPP) entry for routers @@ -82,17 +92,35 @@ func (oc *DefaultNetworkController) syncNodeManagementPortDefault(node *corev1.N return err } -func (oc *DefaultNetworkController) syncDefaultGatewayLogicalNetwork( - node *corev1.Node, - l3GatewayConfig *util.L3GatewayConfig, - hostSubnets []*net.IPNet, - hostAddrs []string, -) error { +func (oc *DefaultNetworkController) nodeGatewayConfig(node *corev1.Node) (*GatewayConfig, error) { + l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(node) + if err != nil { + return nil, err + } + + externalIPs := make([]net.IP, len(l3GatewayConfig.IPAddresses)) + for i, ip := range l3GatewayConfig.IPAddresses { + externalIPs[i] = ip.IP + } + + var hostAddrs []string + if config.Gateway.Mode == config.GatewayModeShared { + hostAddrs, err = util.GetNodeHostAddrs(node) + if err != nil && !util.IsAnnotationNotSetError(err) { + return nil, fmt.Errorf("failed to get host CIDRs for node: %s: %v", node.Name, err) + } + } + var clusterSubnets []*net.IPNet for _, clusterSubnet := range config.Default.ClusterSubnets { clusterSubnets = append(clusterSubnets, clusterSubnet.CIDR) } + hostSubnets, err := util.ParseNodeHostSubnetAnnotation(node, oc.GetNetworkName()) + if err != nil { + return nil, err + } + gwLRPIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, oc.GetNetworkName()) if err != nil { if util.IsAnnotationNotSetError(err) { @@ -101,26 +129,20 @@ func (oc *DefaultNetworkController) syncDefaultGatewayLogicalNetwork( var err1 error gwLRPIPs, err1 = util.ParseNodeGatewayRouterLRPAddrs(node) if err1 != nil { - return fmt.Errorf("failed to get join switch port IP address for node %s: %v/%v", node.Name, err, err1) + return nil, fmt.Errorf("failed to get join switch port IP address for node %s: %v/%v", node.Name, err, err1) } } } - externalIPs := make([]net.IP, len(l3GatewayConfig.IPAddresses)) - for i, ip := range l3GatewayConfig.IPAddresses { - externalIPs[i] = ip.IP - } - - return oc.newGatewayManager(node.Name).syncGatewayLogicalNetwork( - node, - l3GatewayConfig, - hostSubnets, - hostAddrs, - clusterSubnets, - gwLRPIPs, - oc.ovnClusterLRPToJoinIfAddrs, - externalIPs, - ) + return &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterSubnets, + gwLRPJoinIPs: gwLRPIPs, + hostAddrs: hostAddrs, + externalIPs: externalIPs, + ovnClusterLRPToJoinIfAddrs: oc.ovnClusterLRPToJoinIfAddrs, + }, nil } func (oc *DefaultNetworkController) addNode(node *corev1.Node) ([]*net.IPNet, error) { @@ -470,6 +492,16 @@ type nodeSyncs struct { syncReroute bool } +func nodeNeedsSync(syncs *nodeSyncs) bool { + return syncs.syncNode || + syncs.syncClusterRouterPort || + syncs.syncMgmtPort || + syncs.syncGw || + syncs.syncHo || + syncs.syncZoneIC || + syncs.syncReroute +} + func (oc *DefaultNetworkController) addUpdateLocalNodeEvent(node *corev1.Node, nSyncs *nodeSyncs) error { var hostSubnets []*net.IPNet var errs []error @@ -492,7 +524,11 @@ func (oc *DefaultNetworkController) addUpdateLocalNodeEvent(node *corev1.Node, n return nil } - klog.Infof("Adding or Updating Node %q", node.Name) + if !nodeNeedsSync(nSyncs) { + return nil + } + + klog.Infof("Adding or Updating local node %q for network %q", node.Name, oc.GetNetworkName()) if nSyncs.syncNode { if hostSubnets, err = oc.addNode(node); err != nil { oc.addNodeFailed.Store(node.Name, true) @@ -509,12 +545,10 @@ func (oc *DefaultNetworkController) addUpdateLocalNodeEvent(node *corev1.Node, n } // since the nodeSync objects are created knowing if hybridOverlay is enabled this should work - if nSyncs.syncHo { + if nSyncs.syncHo && config.HybridOverlay.Enabled { if err = oc.allocateHybridOverlayDRIP(node); err != nil { errs = append(errs, err) oc.hybridOverlayFailed.Store(node.Name, true) - } else { - oc.hybridOverlayFailed.Delete(node.Name) } } @@ -551,30 +585,40 @@ func (oc *DefaultNetworkController) addUpdateLocalNodeEvent(node *corev1.Node, n } } - annotator := kube.NewNodeAnnotator(oc.kube, node.Name) - if config.HybridOverlay.Enabled { - if err := oc.handleHybridOverlayPort(node, annotator); err != nil { - errs = append(errs, fmt.Errorf("failed to set up hybrid overlay logical switch port for %s: %v", node.Name, err)) - } - } else { - // the node needs to cleanup Hybrid overlay annotations LogicalRouterPolicies and Hybrid overlay port - // if it has them and hybrid overlay is not enabled - if err := oc.deleteHybridOverlayPort(node); err != nil { - errs = append(errs, err) - } - if _, exist := node.Annotations[hotypes.HybridOverlayDRMAC]; exist { - annotator.Delete(hotypes.HybridOverlayDRMAC) + if nSyncs.syncHo { + annotator := kube.NewNodeAnnotator(oc.kube, node.Name) + if config.HybridOverlay.Enabled { + if err := oc.handleHybridOverlayPort(node, annotator); err != nil { + errs = append(errs, fmt.Errorf("failed to set up hybrid overlay logical switch port for %s: %v", node.Name, err)) + oc.hybridOverlayFailed.Store(node.Name, true) + } else { + oc.hybridOverlayFailed.Delete(node.Name) + } + } else { + // pedantic - node should never be stored in hybridOverlayFailed if HO is not enabled + oc.hybridOverlayFailed.Delete(node.Name) + + // the node needs to cleanup Hybrid overlay annotations LogicalRouterPolicies and Hybrid overlay port + // if it has them and hybrid overlay is not enabled + if err := oc.deleteHybridOverlayPort(node); err != nil { + errs = append(errs, err) + } else { + // only clear annotations if tear down was successful + if _, exist := node.Annotations[hotypes.HybridOverlayDRMAC]; exist { + annotator.Delete(hotypes.HybridOverlayDRMAC) + } + if _, exist := node.Annotations[hotypes.HybridOverlayDRIP]; exist { + annotator.Delete(hotypes.HybridOverlayDRIP) + } + } } - if _, exist := node.Annotations[hotypes.HybridOverlayDRIP]; exist { - annotator.Delete(hotypes.HybridOverlayDRIP) + if err := annotator.Run(); err != nil { + errs = append(errs, fmt.Errorf("failed to set hybrid overlay annotations for node %s: %v", node.Name, err)) } } - if err := annotator.Run(); err != nil { - errs = append(errs, fmt.Errorf("failed to set hybrid overlay annotations for node %s: %v", node.Name, err)) - } if nSyncs.syncGw { - err := oc.syncNodeGateway(node, nil) + err := oc.syncNodeGateway(node) if err != nil { errs = append(errs, err) oc.gatewaysFailed.Store(node.Name, true) @@ -653,8 +697,8 @@ func (oc *DefaultNetworkController) addUpdateRemoteNodeEvent(node *corev1.Node, } else { oc.syncZoneICFailed.Delete(node.Name) } + klog.V(5).Infof("Creating Interconnect resources for remote node %q on network %q took: %s", node.Name, oc.GetNetworkName(), time.Since(start)) } - klog.V(5).Infof("Creating Interconnect resources for node %v took: %s", node.Name, time.Since(start)) return err } @@ -730,7 +774,7 @@ func (oc *DefaultNetworkController) addUpdateHoNodeEvent(node *corev1.Node) erro return err } - nodes, err := oc.kube.GetNodes() + nodes, err := oc.watchFactory.GetNodes() if err != nil { return err } diff --git a/go-controller/pkg/ovn/master_test.go b/go-controller/pkg/ovn/master_test.go index 8d4b57dda7..8d46c281d3 100644 --- a/go-controller/pkg/ovn/master_test.go +++ b/go-controller/pkg/ovn/master_test.go @@ -23,7 +23,7 @@ import ( clienttesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/record" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" egressfirewallfake "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/clientset/versioned/fake" @@ -336,7 +336,7 @@ func addNodeLogicalFlowsHelper(testData []libovsdbtest.TestData, expectedOVNClus Networks: []string{node.NodeGWIP}, GatewayChassis: []string{chassisName + "-UUID"}, Options: map[string]string{ - "gateway_mtu": "1400", + libovsdbops.GatewayMTU: "1400", }, }) if serviceControllerEnabled { @@ -356,8 +356,8 @@ func addNodeLogicalFlowsHelper(testData []libovsdbtest.TestData, expectedOVNClus UUID: types.SwitchToRouterPrefix + node.Name + "-UUID", Type: "router", Options: map[string]string{ - "router-port": types.RouterToSwitchPrefix + node.Name, - "arp_proxy": kubevirt.ComposeARPProxyLSPOption(), + libovsdbops.RouterPort: types.RouterToSwitchPrefix + node.Name, + "arp_proxy": kubevirt.ComposeARPProxyLSPOption(), }, Addresses: []string{"router"}, }) @@ -383,14 +383,19 @@ func addNodeLogicalFlowsHelper(testData []libovsdbtest.TestData, expectedOVNClus Nexthops: []string{node.NodeMgmtPortIP}, Priority: intPriority, }) - testData = append(testData, &nbdb.LogicalRouterPolicy{ - UUID: "policy-based-route-2-UUID", - Action: nbdb.LogicalRouterPolicyActionReroute, - Match: matchStr2, - Nexthops: []string{node.NodeMgmtPortIP}, - Priority: intPriority, - }) - expectedOVNClusterRouter.Policies = append(expectedOVNClusterRouter.Policies, []string{"policy-based-route-1-UUID", "policy-based-route-2-UUID"}...) + expectedOVNClusterRouter.Policies = append(expectedOVNClusterRouter.Policies, "policy-based-route-1-UUID") + + if config.Gateway.Mode == config.GatewayModeShared { + testData = append(testData, &nbdb.LogicalRouterPolicy{ + UUID: "policy-based-route-2-UUID", + Action: nbdb.LogicalRouterPolicyActionReroute, + Match: matchStr2, + Nexthops: []string{node.NodeMgmtPortIP}, + Priority: intPriority, + }) + expectedOVNClusterRouter.Policies = append(expectedOVNClusterRouter.Policies, "policy-based-route-2-UUID") + + } testData = append(testData, expectedClusterPortGroup) testData = append(testData, expectedClusterRouterPortGroup) return testData @@ -958,6 +963,7 @@ var _ = ginkgo.Describe("Default network controller operations", func() { // Restore global default values before each testcase gomega.Expect(config.PrepareTestConfig()).To(gomega.Succeed()) fakeOvn = NewFakeOVN(true) + config.OVNKubernetesFeature.EnableEgressIP = true app = cli.NewApp() app.Name = "test" @@ -1038,6 +1044,19 @@ var _ = ginkgo.Describe("Default network controller operations", func() { l3GatewayConfig = node1.gatewayConfig(config.GatewayModeLocal, uint(vlanID)) err = util.SetL3GatewayConfig(nodeAnnotator, l3GatewayConfig) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if config.OVNKubernetesFeature.EnableEgressIP { + physicalIPs := []string{} + for _, ip := range l3GatewayConfig.IPAddresses { + physicalIPs = append(physicalIPs, ip.IP.String()) + } + egressNodeIPsASv4, egressNodeIPsASv6 := buildEgressIPNodeAddressSets(physicalIPs) + if config.IPv4Mode { + dbSetup.NBData = append(dbSetup.NBData, egressNodeIPsASv4) + } + if config.IPv6Mode { + dbSetup.NBData = append(dbSetup.NBData, egressNodeIPsASv6) + } + } err = util.UpdateNodeManagementPortMACAddresses(&testNode, nodeAnnotator, ovntest.MustParseMAC(node1.NodeMgmtPortMAC), types.DefaultNetworkName) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1093,9 +1112,6 @@ var _ = ginkgo.Describe("Default network controller operations", func() { }() oc.SCTPSupport = true - - expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, - expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) }) ginkgo.AfterEach(func() { @@ -1113,7 +1129,11 @@ var _ = ginkgo.Describe("Default network controller operations", func() { clusterSubnets := startFakeController(oc, wg) subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = oc.syncDefaultGatewayLogicalNetwork(&testNode, l3GatewayConfig, []*net.IPNet{subnet}, []string{node1.NodeIP}) + + // Get node with the latest annotations set + testNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = oc.syncNodeGateway(testNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) retry.InitRetryObjWithAdd(testNode, testNode.Name, oc.retryNodes) gomega.Expect(retry.RetryObjsLen(oc.retryNodes)).To(gomega.Equal(1)) @@ -1129,6 +1149,8 @@ var _ = ginkgo.Describe("Default network controller operations", func() { gomega.Expect(retry.CheckRetryObj(testNode.Name, oc.retryNodes)).To(gomega.BeFalse()) skipSnat := false + expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, + expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, @@ -1167,15 +1189,17 @@ var _ = ginkgo.Describe("Default network controller operations", func() { types.OVNClusterRouter, badRoute, p) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Syncing node with OVNK") - node, err := oc.kube.GetNode(testNode.Name) + node, err := oc.kube.GetNodeForWindows(testNode.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = oc.syncNodeManagementPortDefault(node, node.Name, []*net.IPNet{subnet}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = oc.syncDefaultGatewayLogicalNetwork(node, l3GatewayConfig, []*net.IPNet{subnet}, []string{node1.NodeIP}) + err = oc.syncNodeGateway(node) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("Stale route should have been removed") skipSnat := false + expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, + expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, @@ -1202,10 +1226,15 @@ var _ = ginkgo.Describe("Default network controller operations", func() { clusterSubnets := startFakeController(oc, wg) subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = oc.syncDefaultGatewayLogicalNetwork(&testNode, l3GatewayConfig, []*net.IPNet{subnet}, []string{node1.NodeIP}) + // Get node with the latest annotations set + testNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = oc.syncNodeGateway(testNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) skipSnat := false + expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, + expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, @@ -1230,10 +1259,17 @@ var _ = ginkgo.Describe("Default network controller operations", func() { newNodeSNAT("stale-nodeNAT-UUID-3", "10.0.0.3", Node1GatewayRouterIP), newNodeSNAT("stale-nodeNAT-UUID-4", "10.0.0.3", "172.16.16.3"), } + extraNatsWithMatch := []*nbdb.NAT{ // used for pod network advertised test + newNodeSNATWithMatch("stale-nodeNAT-UUID-1", "10.1.0.3", Node1GatewayRouterIP, "ip4.dst == $a712973235162149816"), + newNodeSNATWithMatch("stale-nodeNAT-UUID-2", "10.2.0.3", Node1GatewayRouterIP, "ip4.dst == $a712973235162149816"), + newNodeSNATWithMatch("stale-nodeNAT-UUID-3", "10.0.0.3", Node1GatewayRouterIP, "ip4.dst == $a712973235162149816"), + newNodeSNATWithMatch("stale-nodeNAT-UUID-4", "10.0.0.3", "172.16.16.3", "ip4.dst == $a712973235162149816"), + } ginkgo.DescribeTable( "reconciles pod network SNATs from syncGateway", func(condition func(*DefaultNetworkController) error, expectedExtraNATs ...*nbdb.NAT) { app.Action = func(ctx *cli.Context) error { + // Initialize config from CLI flags (including --init-gateways) _, err := config.InitConfig(ctx, nil, nil) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -1243,6 +1279,10 @@ var _ = ginkgo.Describe("Default network controller operations", func() { _, err = fakeClient.KubeClient.CoreV1().Pods(ns.Name).Create(context.TODO(), &pod, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // generate specific test conditions (after base config is set) + err = condition(oc) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // Let the real code run and ensure OVN database sync gomega.Expect(oc.WatchNodes()).To(gomega.Succeed()) @@ -1250,30 +1290,39 @@ var _ = ginkgo.Describe("Default network controller operations", func() { GR := &nbdb.LogicalRouter{ Name: types.GWRouterPrefix + node1.Name, } - err = libovsdbops.CreateOrUpdateNATs(nbClient, GR, extraNats...) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // generate specific test conditions - err = condition(oc) + if !oc.isPodNetworkAdvertisedAtNode(node1.Name) { + err = libovsdbops.CreateOrUpdateNATs(nbClient, GR, extraNats...) + } else { + err = libovsdbops.CreateOrUpdateNATs(nbClient, GR, extraNatsWithMatch...) + } gomega.Expect(err).NotTo(gomega.HaveOccurred()) // ensure the stale SNAT's are cleaned up gomega.Expect(oc.StartServiceController(wg, false)).To(gomega.Succeed()) subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - err = oc.syncDefaultGatewayLogicalNetwork(&testNode, l3GatewayConfig, []*net.IPNet{subnet}, []string{node1.NodeIP}) + // Get node with the latest annotations set + testNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = oc.syncNodeGateway(testNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - skipSnat := config.Gateway.DisableSNATMultipleGWs || oc.isPodNetworkAdvertisedAtNode(node1.Name) + skipSnat := config.Gateway.DisableSNATMultipleGWs && !oc.GetNetInfo().IsPrimaryNetwork() var clusterSubnets []*net.IPNet for _, clusterSubnet := range config.Default.ClusterSubnets { clusterSubnets = append(clusterSubnets, clusterSubnet.CIDR) } - expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, - expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, - []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, - skipSnat, node1.NodeMgmtPortIP, "1400") - - if oc.isPodNetworkAdvertisedAtNode(node1.Name) { + expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, + expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) + if !oc.isPodNetworkAdvertisedAtNode(node1.Name) { + expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, + expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, + []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, + skipSnat, node1.NodeMgmtPortIP, "1400") + } else { + expectedNBDatabaseState = generateGatewayInitExpectedNBWithPodNetworkAdvertised(expectedNBDatabaseState, expectedOVNClusterRouter, + expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, + []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, + skipSnat, node1.NodeMgmtPortIP, "1400", true) addrSet, err := oc.addressSetFactory.GetAddressSet(GetAdvertisedNetworkSubnetsAddressSetDBIDs()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) expectedNBDatabaseState = generateAdvertisedUDNIsolationExpectedNB(expectedNBDatabaseState, oc.GetNetworkName(), oc.GetNetworkID(), clusterSubnets, expectedNodeSwitch, addrSet) @@ -1327,17 +1376,21 @@ var _ = ginkgo.Describe("Default network controller operations", func() { mutableNetInfo.SetPodNetworkAdvertisedVRFs(map[string][]string{"node1": {"vrf"}}) return oc.Reconcile(mutableNetInfo) }, - newNodeSNAT("stale-nodeNAT-UUID-4", "10.0.0.3", "172.16.16.3"), // won't be deleted on this node but will be deleted on the node whose IP is 172.16.16.3 since this pod belongs to this node + // won't be deleted on this node since this pod belongs to node-1 and is advertised so we keep this SNAT + newNodeSNATWithMatch("stale-nodeNAT-UUID-3", "10.0.0.3", Node1GatewayRouterIP, "ip4.dst == $a712973235162149816"), + // won't be deleted on this node but will be deleted on the node whose IP is 172.16.16.3 since this pod belongs to node-1 + newNodeSNATWithMatch("stale-nodeNAT-UUID-4", "10.0.0.3", "172.16.16.3", "ip4.dst == $a712973235162149816"), ), ginkgo.Entry( "When pod network is advertised and DisableSNATMultipleGWs is false", func(oc *DefaultNetworkController) error { config.Gateway.DisableSNATMultipleGWs = false + config.OVNKubernetesFeature.EnableEgressIP = true mutableNetInfo := util.NewMutableNetInfo(oc.GetNetInfo()) mutableNetInfo.SetPodNetworkAdvertisedVRFs(map[string][]string{"node1": {"vrf"}}) return oc.Reconcile(mutableNetInfo) }, - newNodeSNAT("stale-nodeNAT-UUID-4", "10.0.0.3", "172.16.16.3"), // won't be deleted on this node but will be deleted on the node whose IP is 172.16.16.3 since this pod belongs to this node + newNodeSNATWithMatch("stale-nodeNAT-UUID-4", "10.0.0.3", "172.16.16.3", "ip4.dst == $a712973235162149816"), // won't be deleted on this node but will be deleted on the node whose IP is 172.16.16.3 since this pod belongs to this node ), ) @@ -1349,6 +1402,8 @@ var _ = ginkgo.Describe("Default network controller operations", func() { skipSnat := false subnet := ovntest.MustParseIPNet(node1.NodeSubnet) + expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, + expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, @@ -1394,6 +1449,8 @@ var _ = ginkgo.Describe("Default network controller operations", func() { skipSnat := false subnet := ovntest.MustParseIPNet(node1.NodeSubnet) + expectedNBDatabaseState = addNodeLogicalFlowsWithServiceController(nil, expectedOVNClusterRouter, expectedNodeSwitch, + expectedClusterRouterPortGroup, expectedClusterPortGroup, &node1, oc.svcTemplateSupport) expectedNBDatabaseState = generateGatewayInitExpectedNB(expectedNBDatabaseState, expectedOVNClusterRouter, expectedNodeSwitch, node1.Name, clusterSubnets, []*net.IPNet{subnet}, l3GatewayConfig, []*net.IPNet{classBIPAddress(node1.LrpIP)}, []*net.IPNet{classBIPAddress(node1.DrLrpIP)}, @@ -1459,13 +1516,10 @@ var _ = ginkgo.Describe("Default network controller operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) startFakeController(oc, wg) - subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - nodeHostAddrs := []string{} - for _, nodeHostCIDR := range nodeHostCIDRs.UnsortedList() { - ip, _, _ := net.ParseCIDR(nodeHostCIDR) - nodeHostAddrs = append(nodeHostAddrs, ip.String()) - } - err = oc.syncDefaultGatewayLogicalNetwork(&testNode, l3GatewayConfig, []*net.IPNet{subnet}, nodeHostAddrs) + // Get node with the latest annotations set + testNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = oc.syncNodeGateway(testNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // inject transient problem, nbdb is down @@ -1558,13 +1612,9 @@ var _ = ginkgo.Describe("Default network controller operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) startFakeController(oc, wg) - subnet := ovntest.MustParseIPNet(node1.NodeSubnet) - nodeHostAddrs := []string{} - for _, nodeHostCIDR := range nodeHostCIDRs.UnsortedList() { - ip, _, _ := net.ParseCIDR(nodeHostCIDR) - nodeHostAddrs = append(nodeHostAddrs, ip.String()) - } - err = oc.syncDefaultGatewayLogicalNetwork(&testNode, l3GatewayConfig, []*net.IPNet{subnet}, nodeHostAddrs) + testNode, err := fakeClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), testNode.Name, metav1.GetOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = oc.syncNodeGateway(testNode) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Delete the node's gateway Logical Router Port to force node delete to handle a @@ -1639,7 +1689,7 @@ var _ = ginkgo.Describe("Default network controller operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("adding the node becomes possible") - gomega.Expect(oc.retryNodes.ResourceHandler.AddResource(&testNode, false)).To(gomega.Succeed()) + gomega.Eventually(oc.retryNodes.ResourceHandler.AddResource).WithArguments(&testNode, false).Should(gomega.Succeed()) return nil } @@ -1945,6 +1995,12 @@ func newNodeSNAT(uuid, logicalIP, externalIP string) *nbdb.NAT { } } +func newNodeSNATWithMatch(uuid, logicalIP, externalIP, match string) *nbdb.NAT { + nat := newNodeSNAT(uuid, logicalIP, externalIP) + nat.Match = match + return nat +} + func TestController_syncNodes(t *testing.T) { gomega.RegisterFailHandler(ginkgo.Fail) diff --git a/go-controller/pkg/ovn/multihoming_test.go b/go-controller/pkg/ovn/multihoming_test.go index ab3d12425a..bfcdcd1a75 100644 --- a/go-controller/pkg/ovn/multihoming_test.go +++ b/go-controller/pkg/ovn/multihoming_test.go @@ -11,11 +11,12 @@ import ( corev1 "k8s.io/api/core/v1" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" @@ -165,7 +166,7 @@ func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPortsWit delete(lsp.Options, "iface-id-ver") } if ocInfo.bnc.isLayer2Interconnect() { - lsp.Options["requested-tnl-key"] = "1" // hardcode this for now. + lsp.Options[libovsdbops.RequestedTnlKey] = "1" // hardcode this for now. } data = append(data, lsp) @@ -216,12 +217,12 @@ func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPortsWit "k8s.ovn.org/topology": ocInfo.bnc.TopologyType(), "k8s.ovn.org/network": ocInfo.bnc.GetNetworkName(), }, - Options: map[string]string{"router-port": ovntypes.RouterToSwitchPrefix + switchName}, + Options: map[string]string{libovsdbops.RouterPort: ovntypes.RouterToSwitchPrefix + switchName}, Type: "router", } data = append(data, lsp) if util.IsNetworkSegmentationSupportEnabled() && ocInfo.bnc.IsPrimaryNetwork() { - lsp.Options["requested-tnl-key"] = "25" + lsp.Options[libovsdbops.RequestedTnlKey] = "25" } nodeslsps[switchName] = append(nodeslsps[switchName], networkSwitchToGWRouterLSPUUID) @@ -291,11 +292,11 @@ func (em *secondaryNetworkExpectationMachine) expectedLogicalSwitchesAndPortsWit UUID: transitSwitchName + "-UUID", Name: transitSwitchName, OtherConfig: map[string]string{ - "mcast_querier": "false", - "mcast_flood_unregistered": "true", - "interconn-ts": transitSwitchName, - "requested-tnl-key": "16711685", - "mcast_snoop": "true", + "mcast_querier": "false", + "mcast_flood_unregistered": "true", + "interconn-ts": transitSwitchName, + libovsdbops.RequestedTnlKey: "16711685", + "mcast_snoop": "true", }, ExternalIDs: extIDs, }) @@ -332,8 +333,8 @@ func newExpectedSwitchPort(lspUUID string, portName string, podAddr string, pod ovntypes.TopologyExternalID: netInfo.TopologyType(), }, Options: map[string]string{ - "requested-chassis": pod.nodeName, - "iface-id-ver": pod.podName, + libovsdbops.RequestedChassis: pod.nodeName, + "iface-id-ver": pod.podName, }, PortSecurity: []string{podAddr}, } @@ -343,7 +344,7 @@ func newExpectedSwitchToRouterPort(lspUUID string, portName string, pod testPod, lrp := newExpectedSwitchPort(lspUUID, portName, "router", pod, netInfo, nad) lrp.ExternalIDs = nil lrp.Options = map[string]string{ - "router-port": "rtos-isolatednet_test-node", + libovsdbops.RouterPort: "rtos-isolatednet_test-node", } lrp.PortSecurity = nil lrp.Type = "router" diff --git a/go-controller/pkg/ovn/multipolicy_test.go b/go-controller/pkg/ovn/multipolicy_test.go index bb132d215a..095b35772f 100644 --- a/go-controller/pkg/ovn/multipolicy_test.go +++ b/go-controller/pkg/ovn/multipolicy_test.go @@ -20,6 +20,7 @@ import ( ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" ovntest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" @@ -150,8 +151,8 @@ func getExpectedDataPodsAndSwitchesForSecondaryNetwork(fakeOvn *FakeOVN, pods [] ovntypes.TopologyExternalID: ocInfo.bnc.TopologyType(), }, Options: map[string]string{ - "requested-chassis": pod.nodeName, - "iface-id-ver": pod.podName, + libovsdbops.RequestedChassis: pod.nodeName, + "iface-id-ver": pod.podName, }, PortSecurity: []string{podAddr}, diff --git a/go-controller/pkg/ovn/namespace.go b/go-controller/pkg/ovn/namespace.go index 127c034735..07282de4df 100644 --- a/go-controller/pkg/ovn/namespace.go +++ b/go-controller/pkg/ovn/namespace.go @@ -8,10 +8,12 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" + utilnet "k8s.io/utils/net" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" utilerrors "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util/errors" @@ -234,9 +236,41 @@ func (oc *DefaultNetworkController) updateNamespace(old, newer *corev1.Namespace if err != nil { errors = append(errors, err) } else { - if extIPs, err := getExternalIPsGR(oc.watchFactory, pod.Spec.NodeName); err != nil { - errors = append(errors, err) - } else if err = addOrUpdatePodSNAT(oc.nbClient, oc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podAnnotation.IPs); err != nil { + // Helper function to handle the complex SNAT operations + handleSNATOps := func() error { + extIPs, err := getExternalIPsGR(oc.watchFactory, pod.Spec.NodeName) + if err != nil { + return err + } + + var ops []ovsdb.Operation + // Handle each pod IP individually since each IP family needs its own SNAT match + for _, podIP := range podAnnotation.IPs { + ipFamily := utilnet.IPv4 + if utilnet.IsIPv6CIDR(podIP) { + ipFamily = utilnet.IPv6 + } + snatMatch, err := GetNetworkScopedClusterSubnetSNATMatch(oc.nbClient, oc.GetNetInfo(), pod.Spec.NodeName, oc.isPodNetworkAdvertisedAtNode(pod.Spec.NodeName), ipFamily) + if err != nil { + return fmt.Errorf("failed to get SNAT match for node %s for network %s: %v", pod.Spec.NodeName, oc.GetNetworkName(), err) + } + ops, err = addOrUpdatePodSNATOps(oc.nbClient, oc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, []*net.IPNet{podIP}, snatMatch, ops) + if err != nil { + return err + } + } + + // Execute all operations in a single transaction + if len(ops) > 0 { + _, err = libovsdbops.TransactAndCheck(oc.nbClient, ops) + if err != nil { + return fmt.Errorf("failed to update SNAT for pod %s on router %s: %v", pod.Name, oc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), err) + } + } + return nil + } + + if err := handleSNATOps(); err != nil { errors = append(errors, err) } } diff --git a/go-controller/pkg/ovn/namespace_test.go b/go-controller/pkg/ovn/namespace_test.go index c067098709..3e8c556b8a 100644 --- a/go-controller/pkg/ovn/namespace_test.go +++ b/go-controller/pkg/ovn/namespace_test.go @@ -238,6 +238,7 @@ var _ = ginkgo.Describe("OVN Namespace Operations", func() { ginkgo.It("creates an address set for existing nodes when the host network traffic namespace is created", func() { config.Gateway.Mode = config.GatewayModeShared config.Gateway.NodeportEnable = true + config.Gateway.EphemeralPortRange = config.DefaultEphemeralPortRange var err error config.Default.ClusterSubnets, err = config.ParseClusterSubnetEntries(clusterCIDR) gomega.Expect(err).NotTo(gomega.HaveOccurred()) diff --git a/go-controller/pkg/ovn/network_segmentation_test.go b/go-controller/pkg/ovn/network_segmentation_test.go index f52ad64c2f..cfcc0f7e83 100644 --- a/go-controller/pkg/ovn/network_segmentation_test.go +++ b/go-controller/pkg/ovn/network_segmentation_test.go @@ -10,6 +10,7 @@ import ( corev1 "k8s.io/api/core/v1" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -84,7 +85,7 @@ var _ = ginkgo.Describe("OVN Pod Operations with network segmentation", func() { }, Options: map[string]string{ // check requested-chassis will be updated to correct t1.nodeName value - "requested-chassis": t1.nodeName, + libovsdbops.RequestedChassis: t1.nodeName, // check old value for iface-id-ver will be updated to pod.UID "iface-id-ver": "wrong_value", }, diff --git a/go-controller/pkg/ovn/ovn.go b/go-controller/pkg/ovn/ovn.go index 7a1aad8ed7..07b7b6a83b 100644 --- a/go-controller/pkg/ovn/ovn.go +++ b/go-controller/pkg/ovn/ovn.go @@ -16,8 +16,9 @@ import ( listers "k8s.io/client-go/listers/core/v1" ref "k8s.io/client-go/tools/reference" "k8s.io/klog/v2" + v1pod "k8s.io/kubernetes/pkg/api/v1/pod" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kubevirt" @@ -117,6 +118,10 @@ func networkStatusAnnotationsChanged(oldPod, newPod *corev1.Pod) bool { return oldPod.Annotations[nettypes.NetworkStatusAnnot] != newPod.Annotations[nettypes.NetworkStatusAnnot] } +func podBecameReady(oldPod, newPod *corev1.Pod) bool { + return !v1pod.IsPodReadyConditionTrue(oldPod.Status) && v1pod.IsPodReadyConditionTrue(newPod.Status) +} + // ensurePod tries to set up a pod. It returns nil on success and error on failure; failure // indicates the pod set up should be retried later. func (oc *DefaultNetworkController) ensurePod(oldPod, pod *corev1.Pod, addPort bool) error { @@ -131,6 +136,14 @@ func (oc *DefaultNetworkController) ensurePod(oldPod, pod *corev1.Pod, addPort b return oc.ensureRemotePodIP(oldPod, pod, addPort) } + // If an external gateway pod is in terminating or not ready state then remove the + // routes for the external gateway pod + if util.PodTerminating(pod) || !v1pod.IsPodReadyConditionTrue(pod.Status) { + if err := oc.deletePodExternalGW(pod); err != nil { + return fmt.Errorf("ensurePod failed %s/%s: %w", pod.Namespace, pod.Name, err) + } + } + if oc.isPodScheduledinLocalZone(pod) { klog.V(5).Infof("Ensuring zone local for Pod %s/%s in node %s", pod.Namespace, pod.Name, pod.Spec.NodeName) return oc.ensureLocalZonePod(oldPod, pod, addPort) @@ -170,7 +183,7 @@ func (oc *DefaultNetworkController) ensureLocalZonePod(oldPod, pod *corev1.Pod, } } else { // either pod is host-networked or its an update for a normal pod (addPort=false case) - if oldPod == nil || exGatewayAnnotationsChanged(oldPod, pod) || networkStatusAnnotationsChanged(oldPod, pod) { + if oldPod == nil || exGatewayAnnotationsChanged(oldPod, pod) || networkStatusAnnotationsChanged(oldPod, pod) || podBecameReady(oldPod, pod) { if err := oc.addPodExternalGW(pod); err != nil { return fmt.Errorf("addPodExternalGW failed for %s/%s: %w", pod.Namespace, pod.Name, err) } @@ -237,7 +250,7 @@ func (oc *DefaultNetworkController) ensureRemoteZonePod(oldPod, pod *corev1.Pod, } // either pod is host-networked or its an update for a normal pod (addPort=false case) - if oldPod == nil || exGatewayAnnotationsChanged(oldPod, pod) || networkStatusAnnotationsChanged(oldPod, pod) { + if oldPod == nil || exGatewayAnnotationsChanged(oldPod, pod) || networkStatusAnnotationsChanged(oldPod, pod) || podBecameReady(oldPod, pod) { // check if this remote pod is serving as an external GW. If so add the routes in the namespace // associated with this remote pod if err := oc.addPodExternalGW(pod); err != nil { @@ -373,34 +386,17 @@ func (oc *DefaultNetworkController) WatchEgressIPPods() error { } // syncNodeGateway ensures a node's gateway router is configured -func (oc *DefaultNetworkController) syncNodeGateway(node *corev1.Node, hostSubnets []*net.IPNet) error { - l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(node) +func (oc *DefaultNetworkController) syncNodeGateway(node *corev1.Node) error { + gwConfig, err := oc.nodeGatewayConfig(node) if err != nil { - return err + return fmt.Errorf("error getting gateway config for node %s: %v", node.Name, err) } - if hostSubnets == nil { - hostSubnets, err = util.ParseNodeHostSubnetAnnotation(node, ovntypes.DefaultNetworkName) - if err != nil { - return err - } - } - - if l3GatewayConfig.Mode == config.GatewayModeDisabled { - if err := oc.newGatewayManager(node.Name).Cleanup(); err != nil { - return fmt.Errorf("error cleaning up gateway for node %s: %v", node.Name, err) - } - } else if hostSubnets != nil { - var hostAddrs []string - if config.Gateway.Mode == config.GatewayModeShared { - hostAddrs, err = util.GetNodeHostAddrs(node) - if err != nil && !util.IsAnnotationNotSetError(err) { - return fmt.Errorf("failed to get host CIDRs for node: %s: %v", node.Name, err) - } - } - if err := oc.syncDefaultGatewayLogicalNetwork(node, l3GatewayConfig, hostSubnets, hostAddrs); err != nil { - return fmt.Errorf("error creating gateway for node %s: %v", node.Name, err) - } + if err := oc.newGatewayManager(node.Name).SyncGateway( + node, + gwConfig, + ); err != nil { + return fmt.Errorf("error creating gateway for node %s: %v", node.Name, err) } if util.IsPodNetworkAdvertisedAtNode(oc, node.Name) { @@ -497,7 +493,7 @@ func (oc *DefaultNetworkController) InitEgressServiceZoneController() (*egresssv return nil } // used only when IC=true - createDefaultNodeRouteToExternal := func(_ libovsdbclient.Client, _, _ string, _ []config.CIDRNetworkEntry) error { + createDefaultNodeRouteToExternal := func(_ libovsdbclient.Client, _, _ string, _ []config.CIDRNetworkEntry, _ []*net.IPNet) error { return nil } diff --git a/go-controller/pkg/ovn/ovn_test.go b/go-controller/pkg/ovn/ovn_test.go index 0f2a9f1058..0a1b9e3c8f 100644 --- a/go-controller/pkg/ovn/ovn_test.go +++ b/go-controller/pkg/ovn/ovn_test.go @@ -25,7 +25,7 @@ import ( anpapi "sigs.k8s.io/network-policy-api/apis/v1alpha1" anpfake "sigs.k8s.io/network-policy-api/pkg/client/clientset/versioned/fake" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -276,7 +276,7 @@ func (o *FakeOVN) init(nadList []nettypes.NetworkAttachmentDefinition) { err = o.eIPController.SyncLocalNodeZonesCache() gomega.Expect(err).ShouldNot(gomega.HaveOccurred(), "syncing Nodes OVN zones status must succeed to support EgressIP") - existingNodes, err := o.controller.kube.GetNodes() + existingNodes, err := o.controller.watchFactory.GetNodes() if err == nil { for _, node := range existingNodes { o.controller.localZoneNodes.Store(node.Name, true) diff --git a/go-controller/pkg/ovn/pods.go b/go-controller/pkg/ovn/pods.go index 949d48da55..0ad9442e3e 100644 --- a/go-controller/pkg/ovn/pods.go +++ b/go-controller/pkg/ovn/pods.go @@ -12,8 +12,9 @@ import ( corev1 "k8s.io/api/core/v1" ktypes "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" + utilnet "k8s.io/utils/net" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" hotypes "github.com/ovn-org/ovn-kubernetes/go-controller/hybrid-overlay/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -310,13 +311,26 @@ func (oc *DefaultNetworkController) addLogicalPort(pod *corev1.Pod) (err error) if err != nil { return err } - } else if config.Gateway.DisableSNATMultipleGWs && !oc.isPodNetworkAdvertisedAtNode(pod.Spec.NodeName) { + } else if config.Gateway.DisableSNATMultipleGWs { // Add NAT rules to pods if disable SNAT is set and does not have // namespace annotations to go through external egress router if extIPs, err := getExternalIPsGR(oc.watchFactory, pod.Spec.NodeName); err != nil { return err - } else if ops, err = addOrUpdatePodSNATOps(oc.nbClient, oc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, podAnnotation.IPs, "", ops); err != nil { - return err + } else { + // Handle each pod IP individually since each IP family needs its own SNAT match + for _, podIP := range podAnnotation.IPs { + ipFamily := utilnet.IPv4 + if utilnet.IsIPv6CIDR(podIP) { + ipFamily = utilnet.IPv6 + } + snatMatch, err := GetNetworkScopedClusterSubnetSNATMatch(oc.nbClient, oc.GetNetInfo(), pod.Spec.NodeName, oc.isPodNetworkAdvertisedAtNode(pod.Spec.NodeName), ipFamily) + if err != nil { + return fmt.Errorf("failed to get SNAT match for node %s for network %s: %v", pod.Spec.NodeName, oc.GetNetworkName(), err) + } + if ops, err = addOrUpdatePodSNATOps(oc.nbClient, oc.GetNetworkScopedGWRouterName(pod.Spec.NodeName), extIPs, []*net.IPNet{podIP}, snatMatch, ops); err != nil { + return err + } + } } } diff --git a/go-controller/pkg/ovn/pods_test.go b/go-controller/pkg/ovn/pods_test.go index 6045183157..590d34bf3a 100644 --- a/go-controller/pkg/ovn/pods_test.go +++ b/go-controller/pkg/ovn/pods_test.go @@ -124,6 +124,12 @@ func newPod(namespace, name, node, podIP string) *corev1.Pod { Phase: corev1.PodRunning, PodIP: podIP, PodIPs: podIPs, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, }, } } @@ -470,8 +476,8 @@ func getExpectedDataPodsSwitchesPortGroup(netInfo util.NetInfo, pods []testPod, "namespace": pod.namespace, }, Options: map[string]string{ - "requested-chassis": pod.nodeName, - "iface-id-ver": pod.podName, + libovsdbops.RequestedChassis: pod.nodeName, + "iface-id-ver": pod.podName, }, PortSecurity: []string{podAddr}, } @@ -2018,7 +2024,7 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { }, Options: map[string]string{ // check requested-chassis will be updated to correct t1.nodeName value - "requested-chassis": t2.nodeName, + libovsdbops.RequestedChassis: t2.nodeName, // check old value for iface-id-ver will be updated to pod.UID "iface-id-ver": "wrong_value", }, @@ -2033,7 +2039,7 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { "namespace": t2.namespace, }, Options: map[string]string{ - "requested-chassis": t2.nodeName, + libovsdbops.RequestedChassis: t2.nodeName, //"iface-id-ver": is empty to check that it won't be set on update }, PortSecurity: []string{fmt.Sprintf("%s %s", t2.podMAC, t2.podIP)}, @@ -2048,7 +2054,7 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { }, Options: map[string]string{ // check requested-chassis will be updated to correct t1.nodeName value - "requested-chassis": t3.nodeName, + libovsdbops.RequestedChassis: t3.nodeName, // check old value for iface-id-ver will be updated to pod.UID "iface-id-ver": "wrong_value", }, @@ -2218,7 +2224,7 @@ var _ = ginkgo.Describe("OVN Pod Operations", func() { }, Options: map[string]string{ // check requested-chassis will be updated to correct t1.nodeName value - "requested-chassis": t1.nodeName, + libovsdbops.RequestedChassis: t1.nodeName, // check old value for iface-id-ver will be updated to pod.UID "iface-id-ver": "wrong_value", }, diff --git a/go-controller/pkg/ovn/policy_test.go b/go-controller/pkg/ovn/policy_test.go index 657f3d074a..bcfb4898a3 100644 --- a/go-controller/pkg/ovn/policy_test.go +++ b/go-controller/pkg/ovn/policy_test.go @@ -20,7 +20,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/routeimport/route_import.go b/go-controller/pkg/ovn/routeimport/route_import.go index 94da3d34fe..18c372c276 100644 --- a/go-controller/pkg/ovn/routeimport/route_import.go +++ b/go-controller/pkg/ovn/routeimport/route_import.go @@ -15,8 +15,8 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" controllerutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/controller" nbdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller.go b/go-controller/pkg/ovn/secondary_layer2_network_controller.go index c11ca2a2ae..dacf37d090 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer2_network_controller.go @@ -180,7 +180,8 @@ func (h *secondaryLayer2NetworkControllerEventHandler) UpdateResource(oldObj, ne hostCIDRsChanged(oldNode, newNode) || nodeGatewayMTUSupportChanged(oldNode, newNode) _, syncRerouteFailed := h.oc.syncEIPNodeRerouteFailed.Load(newNode.Name) - shouldSyncReroute := syncRerouteFailed || util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) + shouldSyncReroute := syncRerouteFailed || util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) || + joinCIDRChanged(oldNode, newNode, h.oc.GetNetworkName()) nodeSyncsParam = &nodeSyncs{ syncMgmtPort: shouldSyncMgmtPort, syncGw: shouldSyncGW, @@ -574,42 +575,40 @@ func (oc *SecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *corev1 gwManager := oc.gatewayManagerForNode(node.Name) oc.gatewayManagers.Store(node.Name, gwManager) - gwConfig, err := oc.nodeGatewayConfig(node) - if err != nil { - errs = append(errs, err) - oc.gatewaysFailed.Store(node.Name, true) - } else { - if err := gwManager.syncNodeGateway( + err := func() error { + gwConfig, err := oc.nodeGatewayConfig(node) + if err != nil { + return err + } + if err := gwManager.SyncGateway( node, - gwConfig.config, - gwConfig.hostSubnets, - nil, - gwConfig.hostSubnets, - gwConfig.gwLRPJoinIPs, // the joinIP allocated to this node for this controller's network - nil, // no need for ovnClusterLRPToJoinIfAddrs - gwConfig.externalIPs, + gwConfig, ); err != nil { - errs = append(errs, err) - oc.gatewaysFailed.Store(node.Name, true) - } else { - if !util.IsPodNetworkAdvertisedAtNode(oc, node.Name) { - err = oc.addUDNClusterSubnetEgressSNAT(gwConfig.hostSubnets, gwManager.gwRouterName) - if err == nil && util.IsRouteAdvertisementsEnabled() { - err = oc.deleteAdvertisedNetworkIsolation(node.Name) - } - } else { - err = oc.deleteUDNClusterSubnetEgressSNAT(gwConfig.hostSubnets, gwManager.gwRouterName) - if err == nil { - err = oc.addAdvertisedNetworkIsolation(node.Name) + return err + } + isUDNAdvertised := util.IsPodNetworkAdvertisedAtNode(oc, node.Name) + err = oc.addOrUpdateUDNClusterSubnetEgressSNAT(gwConfig.hostSubnets, gwManager.gwRouterName, isUDNAdvertised) + if err != nil { + return err + } + if !isUDNAdvertised { + if util.IsRouteAdvertisementsEnabled() { + if err = oc.deleteAdvertisedNetworkIsolation(node.Name); err != nil { + return err } } - if err != nil { - errs = append(errs, err) - oc.gatewaysFailed.Store(node.Name, true) - } else { - oc.gatewaysFailed.Delete(node.Name) + } else { + if err = oc.addAdvertisedNetworkIsolation(node.Name); err != nil { + return err } } + oc.gatewaysFailed.Delete(node.Name) + return nil + }() + + if err != nil { + errs = append(errs, err) + oc.gatewaysFailed.Store(node.Name, true) } } @@ -633,7 +632,7 @@ func (oc *SecondaryLayer2NetworkController) addUpdateLocalNodeEvent(node *corev1 if config.OVNKubernetesFeature.EnableEgressIP && nSyncs.syncReroute { rerouteFailed := false - if err := oc.eIPController.ensureRouterPoliciesForNetwork(oc.GetNetInfo()); err != nil { + if err := oc.eIPController.ensureRouterPoliciesForNetwork(oc.GetNetInfo(), node); err != nil { errs = append(errs, fmt.Errorf("failed to ensure EgressIP router policies for network %s: %v", oc.GetNetworkName(), err)) rerouteFailed = true } @@ -724,8 +723,8 @@ func (oc *SecondaryLayer2NetworkController) addPortForRemoteNodeGR(node *corev1. node.Name, oc.GetNetworkName(), err) } logicalSwitchPort.Options = map[string]string{ - "requested-tnl-key": strconv.Itoa(tunnelID), - "requested-chassis": node.Name, + libovsdbops.RequestedTnlKey: strconv.Itoa(tunnelID), + libovsdbops.RequestedChassis: node.Name, } sw := nbdb.LogicalSwitch{Name: oc.GetNetworkScopedSwitchName(types.OVNLayer2Switch)} err = libovsdbops.CreateOrUpdateLogicalSwitchPortsOnSwitch(oc.nbClient, &sw, &logicalSwitchPort) @@ -746,7 +745,8 @@ func (oc *SecondaryLayer2NetworkController) deleteNodeEvent(node *corev1.Node) e return nil } -// addUDNClusterSubnetEgressSNAT adds the SNAT on each node's GR in L2 networks +// addOrUpdateUDNClusterSubnetEgressSNAT adds or updates the SNAT on each node's GR in L2 networks for each UDN +// Based on the isUDNAdvertised flag, the SNAT matches are slightly different // snat eth.dst == d6:cf:fd:2c:a6:44 169.254.0.12 10.128.0.0/14 // snat eth.dst == d6:cf:fd:2c:a6:44 169.254.0.12 2010:100:200::/64 // these SNATs are required for pod2Egress traffic in LGW mode and pod2SameNode traffic in SGW mode to function properly on UDNs @@ -756,52 +756,29 @@ func (oc *SecondaryLayer2NetworkController) deleteNodeEvent(node *corev1.Node) e // externalIP = "169.254.0.12"; which is the masqueradeIP for this L2 UDN // so all in all we want to condionally SNAT all packets that are coming from pods hosted on this node, // which are leaving via UDN's mpX interface to the UDN's masqueradeIP. -func (oc *SecondaryLayer2NetworkController) addUDNClusterSubnetEgressSNAT(localPodSubnets []*net.IPNet, routerName string) error { - outputPort := types.GWRouterToJoinSwitchPrefix + routerName - nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort) +// If isUDNAdvertised is true, then we want to SNAT all packets that are coming from pods on this network +// leaving towards nodeIPs on the cluster to masqueradeIP. If network is advertise then the SNAT looks like this: +// "eth.dst == 0a:58:5d:5d:00:02 && (ip4.dst == $a712973235162149816)" "169.254.0.36" "93.93.0.0/16" +func (oc *SecondaryLayer2NetworkController) addOrUpdateUDNClusterSubnetEgressSNAT(localPodSubnets []*net.IPNet, gwRouterName string, isUDNAdvertised bool) error { + outputPort := types.GWRouterToJoinSwitchPrefix + gwRouterName + nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort, isUDNAdvertised) if err != nil { return err } if len(nats) == 0 { return nil // nothing to do } - router := &nbdb.LogicalRouter{ - Name: routerName, + gwRouter := &nbdb.LogicalRouter{ + Name: gwRouterName, } - if err := libovsdbops.CreateOrUpdateNATs(oc.nbClient, router, nats...); err != nil { + if err := libovsdbops.CreateOrUpdateNATs(oc.nbClient, gwRouter, nats...); err != nil { return fmt.Errorf("failed to update SNAT for cluster on router: %q for network %q, error: %w", - routerName, oc.GetNetworkName(), err) + gwRouterName, oc.GetNetworkName(), err) } return nil } -func (oc *SecondaryLayer2NetworkController) deleteUDNClusterSubnetEgressSNAT(localPodSubnets []*net.IPNet, routerName string) error { - outputPort := types.GWRouterToJoinSwitchPrefix + routerName - nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort) - if err != nil { - return err - } - if len(nats) == 0 { - return nil // nothing to do - } - router := &nbdb.LogicalRouter{ - Name: routerName, - } - if err := libovsdbops.DeleteNATs(oc.nbClient, router, nats...); err != nil { - return fmt.Errorf("failed to delete SNAT for cluster on router: %q for network %q, error: %w", - routerName, oc.GetNetworkName(), err) - } - return nil -} - -type SecondaryL2GatewayConfig struct { - config *util.L3GatewayConfig - hostSubnets []*net.IPNet - gwLRPJoinIPs []*net.IPNet - externalIPs []net.IP -} - -func (oc *SecondaryLayer2NetworkController) nodeGatewayConfig(node *corev1.Node) (*SecondaryL2GatewayConfig, error) { +func (oc *SecondaryLayer2NetworkController) nodeGatewayConfig(node *corev1.Node) (*GatewayConfig, error) { l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(node) if err != nil { return nil, fmt.Errorf("failed to get node %s network %s L3 gateway config: %v", node.Name, oc.GetNetworkName(), err) @@ -841,11 +818,14 @@ func (oc *SecondaryLayer2NetworkController) nodeGatewayConfig(node *corev1.Node) // Overwrite the primary interface ID with the correct, per-network one. l3GatewayConfig.InterfaceID = oc.GetNetworkScopedExtPortName(l3GatewayConfig.BridgeID, node.Name) - return &SecondaryL2GatewayConfig{ - config: l3GatewayConfig, - hostSubnets: hostSubnets, - gwLRPJoinIPs: gwLRPJoinIPs, - externalIPs: externalIPs, + return &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: hostSubnets, + gwLRPJoinIPs: gwLRPJoinIPs, + hostAddrs: nil, + externalIPs: externalIPs, + ovnClusterLRPToJoinIfAddrs: nil, }, nil } diff --git a/go-controller/pkg/ovn/secondary_layer2_network_controller_test.go b/go-controller/pkg/ovn/secondary_layer2_network_controller_test.go index 91fc80bc6e..1079a14198 100644 --- a/go-controller/pkg/ovn/secondary_layer2_network_controller_test.go +++ b/go-controller/pkg/ovn/secondary_layer2_network_controller_test.go @@ -19,6 +19,7 @@ import ( "k8s.io/utils/ptr" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" @@ -605,7 +606,7 @@ func expectedLayer2EgressEntities(netInfo util.NetInfo, gwConfig util.L3GatewayC } func expectedGWToNetworkSwitchRouterPort(name string, netInfo util.NetInfo, networks ...*net.IPNet) *nbdb.LogicalRouterPort { - options := map[string]string{"gateway_mtu": fmt.Sprintf("%d", 1400)} + options := map[string]string{libovsdbops.GatewayMTU: fmt.Sprintf("%d", 1400)} lrp := expectedLogicalRouterPort(name, netInfo, options, networks...) if config.IPv6Mode { diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller.go b/go-controller/pkg/ovn/secondary_layer3_network_controller.go index 65ca015ab7..e9745fe9b2 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller.go @@ -12,7 +12,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/pod" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -107,7 +107,6 @@ func (h *secondaryLayer3NetworkControllerEventHandler) AddResource(obj interface if !ok { return fmt.Errorf("could not cast %T object to *kapi.Node", obj) } - if h.oc.isLocalZoneNode(node) { var nodeParams *nodeSyncs if fromRetryLoop { @@ -187,7 +186,8 @@ func (h *secondaryLayer3NetworkControllerEventHandler) UpdateResource(oldObj, ne hostCIDRsChanged(oldNode, newNode) || nodeGatewayMTUSupportChanged(oldNode, newNode) _, failed = h.oc.syncEIPNodeRerouteFailed.Load(newNode.Name) - syncReroute := failed || util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) + syncReroute := failed || util.NodeHostCIDRsAnnotationChanged(oldNode, newNode) || + joinCIDRChanged(oldNode, newNode, h.oc.GetNetworkName()) nodeSyncsParam = &nodeSyncs{ syncNode: nodeSync, syncClusterRouterPort: clusterRtrSync, @@ -684,7 +684,7 @@ func (oc *SecondaryLayer3NetworkController) init() error { } } - // FIXME: When https://github.com/ovn-org/libovsdb/issues/235 is fixed, + // FIXME: When https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed, // use IsTableSupported(nbdb.LoadBalancerGroup). if _, _, err := util.RunOVNNbctl("--columns=_uuid", "list", "Load_Balancer_Group"); err != nil { klog.Warningf("Load Balancer Group support enabled, however version of OVN in use does not support Load Balancer Groups.") @@ -704,7 +704,6 @@ func (oc *SecondaryLayer3NetworkController) addUpdateLocalNodeEvent(node *corev1 var hostSubnets []*net.IPNet var errs []error var err error - _, _ = oc.localZoneNodes.LoadOrStore(node.Name, true) if noHostSubnet := util.NoHostSubnet(node); noHostSubnet { @@ -715,7 +714,11 @@ func (oc *SecondaryLayer3NetworkController) addUpdateLocalNodeEvent(node *corev1 return nil } - klog.Infof("Adding or Updating Node %q for network %s", node.Name, oc.GetNetworkName()) + if !nodeNeedsSync(nSyncs) { + return nil + } + + klog.Infof("Adding or Updating local node %q for network %q", node.Name, oc.GetNetworkName()) if nSyncs.syncNode { if hostSubnets, err = oc.addNode(node); err != nil { oc.addNodeFailed.Store(node.Name, true) @@ -774,15 +777,9 @@ func (oc *SecondaryLayer3NetworkController) addUpdateLocalNodeEvent(node *corev1 errs = append(errs, fmt.Errorf("failed to generate node GW configuration: %v", err)) oc.gatewaysFailed.Store(node.Name, true) } else { - if err := gwManager.syncNodeGateway( + if err := gwManager.SyncGateway( node, - gwConfig.config, - gwConfig.hostSubnets, - gwConfig.hostAddrs, - gwConfig.clusterSubnets, - gwConfig.gwLRPJoinIPs, // the joinIP allocated to this node for this controller's network - oc.ovnClusterLRPToJoinIfAddrs, // the .1 of this controller's global joinSubnet - gwConfig.externalIPs, + gwConfig, ); err != nil { errs = append(errs, fmt.Errorf( "failed to sync node GW for network %q: %v", @@ -817,7 +814,7 @@ func (oc *SecondaryLayer3NetworkController) addUpdateLocalNodeEvent(node *corev1 if config.OVNKubernetesFeature.EnableEgressIP && util.IsNetworkSegmentationSupportEnabled() && oc.IsPrimaryNetwork() && nSyncs.syncReroute { rerouteFailed := false - if err = oc.eIPController.ensureRouterPoliciesForNetwork(oc.GetNetInfo()); err != nil { + if err = oc.eIPController.ensureRouterPoliciesForNetwork(oc.GetNetInfo(), node); err != nil { errs = append(errs, fmt.Errorf("failed to ensure EgressIP router polices for network %s: %v", oc.GetNetworkName(), err)) rerouteFailed = true } @@ -860,7 +857,8 @@ func (oc *SecondaryLayer3NetworkController) addUpdateRemoteNodeEvent(node *corev return err } -// addNodeSubnetEgressSNAT adds the SNAT on each node's ovn-cluster-router in L3 networks +// addOrUpdateUDNNodeSubnetEgressSNAT adds or updates the SNAT on each node's ovn-cluster-router in L3 networks for each UDN +// Based on the isUDNAdvertised flag, the SNAT matches are slightly different // snat eth.dst == d6:cf:fd:2c:a6:44 169.254.0.12 10.128.0.0/24 // snat eth.dst == d6:cf:fd:2c:a6:44 169.254.0.12 2010:100:200::/64 // these SNATs are required for pod2Egress traffic in LGW mode and pod2SameNode traffic in SGW mode to function properly on UDNs @@ -870,9 +868,12 @@ func (oc *SecondaryLayer3NetworkController) addUpdateRemoteNodeEvent(node *corev // externalIP = "169.254.0.12"; which is the masqueradeIP for this L3 UDN // so all in all we want to condionally SNAT all packets that are coming from pods hosted on this node, // which are leaving via UDN's mpX interface to the UDN's masqueradeIP. -func (oc *SecondaryLayer3NetworkController) addUDNNodeSubnetEgressSNAT(localPodSubnets []*net.IPNet, node *corev1.Node) error { +// If isUDNAdvertised is true, then we want to SNAT all packets that are coming from pods on this network +// leaving towards nodeIPs on the cluster to masqueradeIP. If network is advertise then the SNAT looks like this: +// "eth.dst == 0a:58:5d:5d:00:02 && (ip4.dst == $a712973235162149816)" "169.254.0.36" "93.93.0.0/24" +func (oc *SecondaryLayer3NetworkController) addOrUpdateUDNNodeSubnetEgressSNAT(localPodSubnets []*net.IPNet, node *corev1.Node, isUDNAdvertised bool) error { outputPort := types.RouterToSwitchPrefix + oc.GetNetworkScopedName(node.Name) - nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort) + nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort, isUDNAdvertised) if err != nil { return fmt.Errorf("failed to build UDN masquerade SNATs for network %q on node %q, err: %w", oc.GetNetworkName(), node.Name, err) @@ -890,28 +891,6 @@ func (oc *SecondaryLayer3NetworkController) addUDNNodeSubnetEgressSNAT(localPodS return nil } -// deleteUDNNodeSubnetEgressSNAT deletes SNAT rule from network specific -// ovn_cluster_router depending on whether the network is advertised or not -func (oc *SecondaryLayer3NetworkController) deleteUDNNodeSubnetEgressSNAT(localPodSubnets []*net.IPNet, node *corev1.Node) error { - outputPort := types.RouterToSwitchPrefix + oc.GetNetworkScopedName(node.Name) - nats, err := oc.buildUDNEgressSNAT(localPodSubnets, outputPort) - if err != nil { - return fmt.Errorf("failed to build UDN masquerade SNATs for network %q on node %q, err: %w", - oc.GetNetworkName(), node.Name, err) - } - if len(nats) == 0 { - return nil // nothing to do - } - router := &nbdb.LogicalRouter{ - Name: oc.GetNetworkScopedClusterRouterName(), - } - if err := libovsdbops.DeleteNATs(oc.nbClient, router, nats...); err != nil { - return fmt.Errorf("failed to delete SNAT for node subnet on router: %q for network %q, error: %w", - oc.GetNetworkScopedClusterRouterName(), oc.GetNetworkName(), err) - } - return nil -} - func (oc *SecondaryLayer3NetworkController) addNode(node *corev1.Node) ([]*net.IPNet, error) { // Node subnet for the secondary layer3 network is allocated by cluster manager. // Make sure that the node is allocated with the subnet before proceeding @@ -926,19 +905,17 @@ func (oc *SecondaryLayer3NetworkController) addNode(node *corev1.Node) ([]*net.I return nil, err } if util.IsNetworkSegmentationSupportEnabled() && oc.IsPrimaryNetwork() { - if !util.IsPodNetworkAdvertisedAtNode(oc, node.Name) { - if err := oc.addUDNNodeSubnetEgressSNAT(hostSubnets, node); err != nil { - return nil, err - } + isUDNAdvertised := util.IsPodNetworkAdvertisedAtNode(oc, node.Name) + if err := oc.addOrUpdateUDNNodeSubnetEgressSNAT(hostSubnets, node, isUDNAdvertised); err != nil { + return nil, err + } + if !isUDNAdvertised { if util.IsRouteAdvertisementsEnabled() { if err := oc.deleteAdvertisedNetworkIsolation(node.Name); err != nil { return nil, err } } } else { - if err := oc.deleteUDNNodeSubnetEgressSNAT(hostSubnets, node); err != nil { - return nil, err - } if err := oc.addAdvertisedNetworkIsolation(node.Name); err != nil { return nil, err } @@ -1042,16 +1019,7 @@ func (oc *SecondaryLayer3NetworkController) gatherJoinSwitchIPs() error { return nil } -type SecondaryL3GatewayConfig struct { - config *util.L3GatewayConfig - hostSubnets []*net.IPNet - clusterSubnets []*net.IPNet - gwLRPJoinIPs []*net.IPNet - hostAddrs []string - externalIPs []net.IP -} - -func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *corev1.Node) (*SecondaryL3GatewayConfig, error) { +func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *corev1.Node) (*GatewayConfig, error) { l3GatewayConfig, err := util.ParseNodeL3GatewayAnnotation(node) if err != nil { return nil, fmt.Errorf("failed to get node %s network %s L3 gateway config: %v", node.Name, oc.GetNetworkName(), err) @@ -1101,13 +1069,14 @@ func (oc *SecondaryLayer3NetworkController) nodeGatewayConfig(node *corev1.Node) // Overwrite the primary interface ID with the correct, per-network one. l3GatewayConfig.InterfaceID = oc.GetNetworkScopedExtPortName(l3GatewayConfig.BridgeID, node.Name) - return &SecondaryL3GatewayConfig{ - config: l3GatewayConfig, - hostSubnets: hostSubnets, - clusterSubnets: clusterSubnets, - gwLRPJoinIPs: gwLRPJoinIPs, - hostAddrs: hostAddrs, - externalIPs: externalIPs, + return &GatewayConfig{ + annoConfig: l3GatewayConfig, + hostSubnets: hostSubnets, + clusterSubnets: clusterSubnets, + gwLRPJoinIPs: gwLRPJoinIPs, + hostAddrs: hostAddrs, + externalIPs: externalIPs, + ovnClusterLRPToJoinIfAddrs: oc.ovnClusterLRPToJoinIfAddrs, }, nil } diff --git a/go-controller/pkg/ovn/secondary_layer3_network_controller_test.go b/go-controller/pkg/ovn/secondary_layer3_network_controller_test.go index 077a6fd822..163d06dfd9 100644 --- a/go-controller/pkg/ovn/secondary_layer3_network_controller_test.go +++ b/go-controller/pkg/ovn/secondary_layer3_network_controller_test.go @@ -762,7 +762,7 @@ func expectedGatewayChassis(nodeName string, netInfo util.NetInfo, gwConfig util func expectedGRToJoinSwitchLRP(gatewayRouterName string, gwRouterLRPIP *net.IPNet, netInfo util.NetInfo) *nbdb.LogicalRouterPort { lrpName := fmt.Sprintf("%s%s", types.GWRouterToJoinSwitchPrefix, gatewayRouterName) - options := map[string]string{"gateway_mtu": fmt.Sprintf("%d", 1400)} + options := map[string]string{libovsdbops.GatewayMTU: fmt.Sprintf("%d", 1400)} return expectedLogicalRouterPort(lrpName, netInfo, options, gwRouterLRPIP) } @@ -836,7 +836,7 @@ func expectedLayer3EgressEntities(netInfo util.NetInfo, gwConfig util.L3GatewayC Networks: []string{"192.168.1.1/24"}, MAC: "0a:58:c0:a8:01:01", GatewayChassis: []string{gatewayChassisUUID}, - Options: map[string]string{"gateway_mtu": "1400"}, + Options: map[string]string{libovsdbops.GatewayMTU: "1400"}, }, expectedGRStaticRoute(staticRouteUUID1, nodeSubnet.String(), lrsrNextHop, &nbdb.LogicalRouterStaticRoutePolicySrcIP, nil, netInfo), expectedGRStaticRoute(staticRouteUUID2, gwRouterJoinIPAddress().IP.String(), gwRouterJoinIPAddress().IP.String(), nil, nil, netInfo), @@ -973,7 +973,7 @@ func externalSwitchRouterPortOptions(gatewayRouterName string) map[string]string return map[string]string{ "nat-addresses": "router", "exclude-lb-vips-from-garp": "true", - "router-port": types.GWRouterToExtSwitchPrefix + gatewayRouterName, + libovsdbops.RouterPort: types.GWRouterToExtSwitchPrefix + gatewayRouterName, } } @@ -992,7 +992,7 @@ func expectedJoinSwitchAndLSPs(netInfo util.NetInfo, nodeName string) []libovsdb Name: types.JoinSwitchToGWRouterPrefix + gwRouterName, Addresses: []string{"router"}, ExternalIDs: standardNonDefaultNetworkExtIDs(netInfo), - Options: map[string]string{"router-port": types.GWRouterToJoinSwitchPrefix + gwRouterName}, + Options: map[string]string{libovsdbops.RouterPort: types.GWRouterToJoinSwitchPrefix + gwRouterName}, Type: "router", }, } diff --git a/go-controller/pkg/ovn/secondary_localnet_network_controller.go b/go-controller/pkg/ovn/secondary_localnet_network_controller.go index 3c6fef1027..4046f819ce 100644 --- a/go-controller/pkg/ovn/secondary_localnet_network_controller.go +++ b/go-controller/pkg/ovn/secondary_localnet_network_controller.go @@ -15,7 +15,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/allocator/pod" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics/recorders" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/networkmanager" addressset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/ovn/address_set" @@ -65,7 +65,7 @@ func (h *secondaryLocalnetNetworkControllerEventHandler) RecordAddEvent(obj inte case factory.MultiNetworkPolicyType: mnp := obj.(*mnpapi.MultiNetworkPolicy) klog.V(5).Infof("Recording add event on multinetwork policy %s/%s", mnp.Namespace, mnp.Name) - metrics.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) + recorders.GetConfigDurationRecorder().Start("multinetworkpolicy", mnp.Namespace, mnp.Name) } } diff --git a/go-controller/pkg/ovn/topology/topologyfactory.go b/go-controller/pkg/ovn/topology/topologyfactory.go index 8781612242..b20743a242 100644 --- a/go-controller/pkg/ovn/topology/topologyfactory.go +++ b/go-controller/pkg/ovn/topology/topologyfactory.go @@ -4,7 +4,7 @@ import ( "fmt" "net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" @@ -130,7 +130,7 @@ func (gtf *GatewayTopologyFactory) NewJoinSwitch( Name: drSwitchPort, Type: "router", Options: map[string]string{ - "router-port": drRouterPort, + libovsdbops.RouterPort: drRouterPort, }, Addresses: []string{"router"}, } diff --git a/go-controller/pkg/ovn/topology/topologyfactory_test.go b/go-controller/pkg/ovn/topology/topologyfactory_test.go index 01b113c97e..4d189e030a 100644 --- a/go-controller/pkg/ovn/topology/topologyfactory_test.go +++ b/go-controller/pkg/ovn/topology/topologyfactory_test.go @@ -5,10 +5,11 @@ import ( cnitypes "github.com/containernetworking/cni/pkg/types" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -237,7 +238,7 @@ func expectedLogicalSwitchPort(portName string) *nbdb.LogicalSwitchPort { Addresses: []string{"router"}, Name: portName, Options: map[string]string{ - "router-port": "rtoj-mydearrouter", + libovsdbops.RouterPort: "rtoj-mydearrouter", }, ParentName: nil, PortSecurity: nil, diff --git a/go-controller/pkg/ovn/udn_isolation.go b/go-controller/pkg/ovn/udn_isolation.go index 6c44489f9c..0230f665b6 100644 --- a/go-controller/pkg/ovn/udn_isolation.go +++ b/go-controller/pkg/ovn/udn_isolation.go @@ -10,8 +10,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/ovsdb" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" @@ -30,6 +30,8 @@ const ( DenySecondaryACL = "DenySecondary" // OpenPortACLPrefix is used to build per-pod ACLs, pod name should be added to the prefix to build a unique name OpenPortACLPrefix = "OpenPort-" + // the same tier is used for all UDN isolation ACLs + isolationTier = types.PrimaryACLTier ) // setupUDNACLs should be called after the node's management port was configured @@ -63,7 +65,8 @@ func (oc *DefaultNetworkController) setupUDNACLs(mgmtPortIPs []net.IP) error { pgName := libovsdbutil.GetPortGroupName(pgIDs) egressDenyIDs := oc.getUDNACLDbIDs(DenySecondaryACL, libovsdbutil.ACLEgress) match := libovsdbutil.GetACLMatch(pgName, "", libovsdbutil.ACLEgress) - egressDenyACL := libovsdbutil.BuildACL(egressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, nil, libovsdbutil.LportEgress) + egressDenyACL := libovsdbutil.BuildACL(egressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, + nil, libovsdbutil.LportEgress, isolationTier) getARPMatch := func(direction libovsdbutil.ACLDirection) string { match := "(" @@ -89,15 +92,18 @@ func (oc *DefaultNetworkController) setupUDNACLs(mgmtPortIPs []net.IP) error { egressARPIDs := oc.getUDNACLDbIDs(AllowHostARPACL, libovsdbutil.ACLEgress) match = libovsdbutil.GetACLMatch(pgName, getARPMatch(libovsdbutil.ACLEgress), libovsdbutil.ACLEgress) - egressARPACL := libovsdbutil.BuildACL(egressARPIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllow, nil, libovsdbutil.LportEgress) + egressARPACL := libovsdbutil.BuildACL(egressARPIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllow, + nil, libovsdbutil.LportEgress, isolationTier) ingressDenyIDs := oc.getUDNACLDbIDs(DenySecondaryACL, libovsdbutil.ACLIngress) match = libovsdbutil.GetACLMatch(pgName, "", libovsdbutil.ACLIngress) - ingressDenyACL := libovsdbutil.BuildACL(ingressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, nil, libovsdbutil.LportIngress) + ingressDenyACL := libovsdbutil.BuildACL(ingressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, + nil, libovsdbutil.LportIngress, isolationTier) ingressARPIDs := oc.getUDNACLDbIDs(AllowHostARPACL, libovsdbutil.ACLIngress) match = libovsdbutil.GetACLMatch(pgName, getARPMatch(libovsdbutil.ACLIngress), libovsdbutil.ACLIngress) - ingressARPACL := libovsdbutil.BuildACL(ingressARPIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllow, nil, libovsdbutil.LportIngress) + ingressARPACL := libovsdbutil.BuildACL(ingressARPIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllow, + nil, libovsdbutil.LportIngress, isolationTier) ingressAllowIDs := oc.getUDNACLDbIDs(AllowHostSecondaryACL, libovsdbutil.ACLIngress) match = "(" @@ -114,7 +120,8 @@ func (oc *DefaultNetworkController) setupUDNACLs(mgmtPortIPs []net.IP) error { } match += ")" match = libovsdbutil.GetACLMatch(pgName, match, libovsdbutil.ACLIngress) - ingressAllowACL := libovsdbutil.BuildACL(ingressAllowIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) + ingressAllowACL := libovsdbutil.BuildACL(ingressAllowIDs, types.PrimaryUDNAllowPriority, match, nbdb.ACLActionAllowRelated, + nil, libovsdbutil.LportIngress, isolationTier) ops, err := libovsdbops.CreateOrUpdateACLsOps(oc.nbClient, nil, oc.GetSamplingConfig(), egressDenyACL, egressARPACL, ingressARPACL, ingressDenyACL, ingressAllowACL) if err != nil { @@ -200,11 +207,11 @@ func (oc *DefaultNetworkController) setUDNPodOpenPortsOps(podNamespacedName stri // don't return on parseErr, as we need to cleanup potentially present ACLs from the previous config ingressIDs := oc.getUDNOpenPortDbIDs(podNamespacedName, libovsdbutil.ACLIngress) ingressACL := libovsdbutil.BuildACL(ingressIDs, types.PrimaryUDNAllowPriority, - ingressMatch, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress) + ingressMatch, nbdb.ACLActionAllowRelated, nil, libovsdbutil.LportIngress, isolationTier) egressIDs := oc.getUDNOpenPortDbIDs(podNamespacedName, libovsdbutil.ACLEgress) egressACL := libovsdbutil.BuildACL(egressIDs, types.PrimaryUDNAllowPriority, - egressMatch, nbdb.ACLActionAllow, nil, libovsdbutil.LportEgress) + egressMatch, nbdb.ACLActionAllow, nil, libovsdbutil.LportEgress, isolationTier) var err error if ingressMatch == "" && egressMatch == "" || parseErr != nil { @@ -288,8 +295,8 @@ func BuildAdvertisedNetworkSubnetsDropACL(advertisedNetworkSubnetsAddressSet add strings.Join(dropMatches, " || "), nbdb.ACLActionDrop, nil, - libovsdbutil.LportEgressAfterLB) - dropACL.Tier = types.PrimaryACLTier + libovsdbutil.LportEgressAfterLB, + isolationTier) return dropACL } @@ -331,8 +338,8 @@ func (bnc *BaseNetworkController) addAdvertisedNetworkIsolation(nodeName string) strings.Join(passMatches, " || "), nbdb.ACLActionPass, nil, - libovsdbutil.LportEgressAfterLB) - passACL.Tier = types.PrimaryACLTier + libovsdbutil.LportEgressAfterLB, + isolationTier) ops, err = libovsdbops.CreateOrUpdateACLsOps(bnc.nbClient, ops, nil, passACL) if err != nil { diff --git a/go-controller/pkg/ovn/udn_isolation_test.go b/go-controller/pkg/ovn/udn_isolation_test.go new file mode 100644 index 0000000000..2b3afda328 --- /dev/null +++ b/go-controller/pkg/ovn/udn_isolation_test.go @@ -0,0 +1,53 @@ +package ovn + +import ( + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" + libovsdbutil "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/util" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/nbdb" + libovsdbtest "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/testing/libovsdb" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("UDN Isolation", func() { + BeforeEach(func() { + Expect(config.PrepareTestConfig()).To(Succeed()) + }) + + It("ACLs should be updated to the Primary tier ", func() { + config.OVNKubernetesFeature.EnableMultiNetwork = true + config.OVNKubernetesFeature.EnableNetworkSegmentation = true + fakeController := getFakeController(DefaultNetworkControllerName) + + // build port group with one ACL that has default tier + pgIDs := fakeController.getSecondaryPodsPortGroupDbIDs() + pgName := libovsdbutil.GetPortGroupName(pgIDs) + egressDenyIDs := fakeController.getUDNACLDbIDs(DenySecondaryACL, libovsdbutil.ACLEgress) + match := libovsdbutil.GetACLMatch(pgName, "", libovsdbutil.ACLEgress) + // in the real code we use BuildACL here instead of BuildACLWithDefaultTier + egressDenyACL := libovsdbutil.BuildACLWithDefaultTier(egressDenyIDs, types.PrimaryUDNDenyPriority, match, nbdb.ACLActionDrop, + nil, libovsdbutil.LportEgress) + // required to make sure port group correctly references the ACL + egressDenyACL.UUID = egressDenyIDs.String() + "-UUID" + pg := libovsdbutil.BuildPortGroup(pgIDs, nil, []*nbdb.ACL{egressDenyACL}) + + nbClient, nbCleanup, err := libovsdbtest.NewNBTestHarness(libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{egressDenyACL, pg}, + }, nil) + Expect(err).NotTo(HaveOccurred()) + defer nbCleanup.Cleanup() + fakeController.nbClient = nbClient + + // now run the setupUDNACLs function which should create all ACLs and update the existing ACLs to the Primary tier + Expect(fakeController.setupUDNACLs(nil)).To(Succeed()) + + // verify that the egressDenyACL is updated to the Primary 0 + acls, err := libovsdbops.FindACLs(nbClient, []*nbdb.ACL{egressDenyACL}) + Expect(err).NotTo(HaveOccurred()) + Expect(acls).To(HaveLen(1)) + Expect(acls[0].Tier).To(Equal(types.PrimaryACLTier)) + }) +}) diff --git a/go-controller/pkg/ovn/zone_interconnect/chassis_handler.go b/go-controller/pkg/ovn/zone_interconnect/chassis_handler.go index 172cac5e33..b838221892 100644 --- a/go-controller/pkg/ovn/zone_interconnect/chassis_handler.go +++ b/go-controller/pkg/ovn/zone_interconnect/chassis_handler.go @@ -10,7 +10,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/zone_interconnect/chassis_handler_test.go b/go-controller/pkg/ovn/zone_interconnect/chassis_handler_test.go index 05b9fb6b9c..df74e807d1 100644 --- a/go-controller/pkg/ovn/zone_interconnect/chassis_handler_test.go +++ b/go-controller/pkg/ovn/zone_interconnect/chassis_handler_test.go @@ -10,7 +10,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go index 3e4cfa458b..ab366c7931 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler.go @@ -12,7 +12,8 @@ import ( "k8s.io/klog/v2" utilnet "k8s.io/utils/net" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" libovsdbops "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/libovsdb/ops" @@ -207,7 +208,6 @@ func (zic *ZoneInterconnectHandler) AddLocalZoneNode(node *corev1.Node) error { // // See createRemoteZoneNodeResources() below for more details. func (zic *ZoneInterconnectHandler) AddRemoteZoneNode(node *corev1.Node) error { start := time.Now() - klog.Infof("Creating interconnect resources for remote zone node %s for the network %s", node.Name, zic.GetNetworkName()) nodeID := util.GetNodeID(node) if nodeID == -1 { @@ -215,10 +215,53 @@ func (zic *ZoneInterconnectHandler) AddRemoteZoneNode(node *corev1.Node) error { return fmt.Errorf("failed to get node id for node - %s", node.Name) } - if err := zic.createRemoteZoneNodeResources(node, nodeID); err != nil { + nodeSubnets, err := util.ParseNodeHostSubnetAnnotation(node, zic.GetNetworkName()) + if err != nil { + err = fmt.Errorf("failed to parse node %s subnets annotation %w", node.Name, err) + if util.IsAnnotationNotSetError(err) { + // remote node may not have the annotation yet, suppress it + return types.NewSuppressedError(err) + } + return err + } + + nodeTransitSwitchPortIPs, err := util.ParseNodeTransitSwitchPortAddrs(node) + if err != nil || len(nodeTransitSwitchPortIPs) == 0 { + err = fmt.Errorf("failed to get the node transit switch port IP addresses : %w", err) + if util.IsAnnotationNotSetError(err) { + return types.NewSuppressedError(err) + } + return err + } + + var nodeGRPIPs []*net.IPNet + // only primary networks have cluster router connected to join switch+GR + // used for adding routes to GR + if !zic.IsSecondary() || (util.IsNetworkSegmentationSupportEnabled() && zic.IsPrimaryNetwork()) { + nodeGRPIPs, err = util.ParseNodeGatewayRouterJoinAddrs(node, zic.GetNetworkName()) + if err != nil { + if util.IsAnnotationNotSetError(err) { + // FIXME(tssurya): This is present for backwards compatibility + // Remove me a few months from now + var err1 error + nodeGRPIPs, err1 = util.ParseNodeGatewayRouterLRPAddrs(node) + if err1 != nil { + err1 = fmt.Errorf("failed to parse node %s Gateway router LRP Addrs annotation %w", node.Name, err1) + if util.IsAnnotationNotSetError(err1) { + return types.NewSuppressedError(err1) + } + return err1 + } + } + } + } + + klog.Infof("Creating interconnect resources for remote zone node %s for the network %s", node.Name, zic.GetNetworkName()) + + if err := zic.createRemoteZoneNodeResources(node, nodeID, nodeTransitSwitchPortIPs, nodeSubnets, nodeGRPIPs); err != nil { return fmt.Errorf("creating interconnect resources for remote zone node %s for the network %s failed : err - %w", node.Name, zic.GetNetworkName(), err) } - klog.Infof("Creating Interconnect resources for node %v took: %s", node.Name, time.Since(start)) + klog.Infof("Creating Interconnect resources for node %q on network %q took: %s", node.Name, zic.GetNetworkName(), time.Since(start)) return nil } @@ -317,7 +360,7 @@ func (zic *ZoneInterconnectHandler) AddTransitPortConfig(remote bool, podAnnotat if port.Options == nil { port.Options = map[string]string{} } - port.Options["requested-tnl-key"] = strconv.Itoa(podAnnotation.TunnelID) + port.Options[libovsdbops.RequestedTnlKey] = strconv.Itoa(podAnnotation.TunnelID) if remote { port.Type = lportTypeRemote @@ -332,7 +375,7 @@ func (zic *ZoneInterconnectHandler) addTransitSwitchConfig(sw *nbdb.LogicalSwitc } sw.OtherConfig["interconn-ts"] = sw.Name - sw.OtherConfig["requested-tnl-key"] = strconv.Itoa(BaseTransitSwitchTunnelKey + networkID) + sw.OtherConfig[libovsdbops.RequestedTnlKey] = strconv.Itoa(BaseTransitSwitchTunnelKey + networkID) sw.OtherConfig["mcast_snoop"] = "true" sw.OtherConfig["mcast_querier"] = "false" sw.OtherConfig["mcast_flood_unregistered"] = "true" @@ -377,8 +420,8 @@ func (zic *ZoneInterconnectHandler) createLocalZoneNodeResources(node *corev1.No } lspOptions := map[string]string{ - "router-port": logicalRouterPortName, - "requested-tnl-key": strconv.Itoa(nodeID), + libovsdbops.RouterPort: logicalRouterPortName, + libovsdbops.RequestedTnlKey: strconv.Itoa(nodeID), } // Store the node name in the external_ids column for book keeping @@ -403,16 +446,7 @@ func (zic *ZoneInterconnectHandler) createLocalZoneNodeResources(node *corev1.No // if the node name is ovn-worker and the network name is blue, the logical port name would be - blue.tstor.ovn-worker // - binds the remote port to the node remote chassis in SBDB // - adds static routes for the remote node via the remote port ip in the ovn_cluster_router -func (zic *ZoneInterconnectHandler) createRemoteZoneNodeResources(node *corev1.Node, nodeID int) error { - nodeTransitSwitchPortIPs, err := util.ParseNodeTransitSwitchPortAddrs(node) - if err != nil || len(nodeTransitSwitchPortIPs) == 0 { - err = fmt.Errorf("failed to get the node transit switch port IP addresses : %w", err) - if util.IsAnnotationNotSetError(err) { - return types.NewSuppressedError(err) - } - return err - } - +func (zic *ZoneInterconnectHandler) createRemoteZoneNodeResources(node *corev1.Node, nodeID int, nodeTransitSwitchPortIPs, nodeSubnets, nodeGRPIPs []*net.IPNet) error { transitRouterPortMac := util.IPAddrToHWAddr(nodeTransitSwitchPortIPs[0].IP) var transitRouterPortNetworks []string for _, ip := range nodeTransitSwitchPortIPs { @@ -425,8 +459,8 @@ func (zic *ZoneInterconnectHandler) createRemoteZoneNodeResources(node *corev1.N } lspOptions := map[string]string{ - "requested-tnl-key": strconv.Itoa(nodeID), - "requested-chassis": node.Name, + libovsdbops.RequestedTnlKey: strconv.Itoa(nodeID), + libovsdbops.RequestedChassis: node.Name, } // Store the node name in the external_ids column for book keeping externalIDs := map[string]string{ @@ -438,7 +472,7 @@ func (zic *ZoneInterconnectHandler) createRemoteZoneNodeResources(node *corev1.N return err } - if err := zic.addRemoteNodeStaticRoutes(node, nodeTransitSwitchPortIPs); err != nil { + if err := zic.addRemoteNodeStaticRoutes(node, nodeTransitSwitchPortIPs, nodeSubnets, nodeGRPIPs); err != nil { return err } @@ -481,7 +515,9 @@ func (zic *ZoneInterconnectHandler) cleanupNode(nodeName string) error { return err } - // Delete any static routes in the cluster router for this node + // Delete any static routes in the cluster router for this node. + // skip types.NetworkExternalID check in the predicate function as this static route may be deleted + // before types.NetworkExternalID external-ids is set correctly during upgrade. p := func(lrsr *nbdb.LogicalRouterStaticRoute) bool { return lrsr.ExternalIDs["ic-node"] == nodeName } @@ -534,82 +570,57 @@ func (zic *ZoneInterconnectHandler) cleanupNodeTransitSwitchPort(nodeName string // Then the below static routes are added // ip4.dst == 10.244.0.0/24 , nexthop = 100.88.0.2 // ip4.dst == 100.64.0.2/16 , nexthop = 100.88.0.2 (only for default primary network) -func (zic *ZoneInterconnectHandler) addRemoteNodeStaticRoutes(node *corev1.Node, nodeTransitSwitchPortIPs []*net.IPNet) error { +func (zic *ZoneInterconnectHandler) addRemoteNodeStaticRoutes(node *corev1.Node, nodeTransitSwitchPortIPs, nodeSubnets, nodeGRPIPs []*net.IPNet) error { + ops := make([]ovsdb.Operation, 0, 2) addRoute := func(prefix, nexthop string) error { logicalRouterStaticRoute := nbdb.LogicalRouterStaticRoute{ ExternalIDs: map[string]string{ - "ic-node": node.Name, + "ic-node": node.Name, + types.NetworkExternalID: zic.GetNetworkName(), }, Nexthop: nexthop, IPPrefix: prefix, } + // Note that because logical router static routes were originally created without types.NetworkExternalID + // external-ids, skip types.NetworkExternalID check in the predicate function to replace existing static route + // with correct external-ids on an upgrade scenario. p := func(lrsr *nbdb.LogicalRouterStaticRoute) bool { return lrsr.IPPrefix == prefix && lrsr.Nexthop == nexthop && lrsr.ExternalIDs["ic-node"] == node.Name } - if err := libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicate(zic.nbClient, zic.networkClusterRouterName, &logicalRouterStaticRoute, p); err != nil { - return fmt.Errorf("failed to create static route: %w", err) + var err error + ops, err = libovsdbops.CreateOrReplaceLogicalRouterStaticRouteWithPredicateOps(zic.nbClient, ops, zic.networkClusterRouterName, &logicalRouterStaticRoute, p) + if err != nil { + return fmt.Errorf("failed to create static route ops: %w", err) } return nil } - nodeSubnets, err := util.ParseNodeHostSubnetAnnotation(node, zic.GetNetworkName()) - if err != nil { - err = fmt.Errorf("failed to parse node %s subnets annotation %w", node.Name, err) - if util.IsAnnotationNotSetError(err) { - // remote node may not have the annotation yet, suppress it - return types.NewSuppressedError(err) - } - return err - } - nodeSubnetStaticRoutes := zic.getStaticRoutes(nodeSubnets, nodeTransitSwitchPortIPs, false) for _, staticRoute := range nodeSubnetStaticRoutes { - // Possible optimization: Add all the routes in one transaction if err := addRoute(staticRoute.prefix, staticRoute.nexthop); err != nil { return fmt.Errorf("error adding static route %s - %s to the router %s : %w", staticRoute.prefix, staticRoute.nexthop, zic.networkClusterRouterName, err) } } - if zic.IsSecondary() && !(util.IsNetworkSegmentationSupportEnabled() && zic.IsPrimaryNetwork()) { - // Secondary network cluster router doesn't connect to a join switch - // or to a Gateway router. - // - // Except for UDN primary L3 networks. - return nil - } - - nodeGRPIPs, err := util.ParseNodeGatewayRouterJoinAddrs(node, zic.GetNetworkName()) - if err != nil { - if util.IsAnnotationNotSetError(err) { - // FIXME(tssurya): This is present for backwards compatibility - // Remove me a few months from now - var err1 error - nodeGRPIPs, err1 = util.ParseNodeGatewayRouterLRPAddrs(node) - if err1 != nil { - err1 = fmt.Errorf("failed to parse node %s Gateway router LRP Addrs annotation %w", node.Name, err1) - if util.IsAnnotationNotSetError(err1) { - return types.NewSuppressedError(err1) - } - return err1 + if len(nodeGRPIPs) > 0 { + nodeGRPIPStaticRoutes := zic.getStaticRoutes(nodeGRPIPs, nodeTransitSwitchPortIPs, true) + for _, staticRoute := range nodeGRPIPStaticRoutes { + if err := addRoute(staticRoute.prefix, staticRoute.nexthop); err != nil { + return fmt.Errorf("error adding static route %s - %s to the router %s : %w", staticRoute.prefix, staticRoute.nexthop, zic.networkClusterRouterName, err) } } } - nodeGRPIPStaticRoutes := zic.getStaticRoutes(nodeGRPIPs, nodeTransitSwitchPortIPs, true) - for _, staticRoute := range nodeGRPIPStaticRoutes { - // Possible optimization: Add all the routes in one transaction - if err := addRoute(staticRoute.prefix, staticRoute.nexthop); err != nil { - return fmt.Errorf("error adding static route %s - %s to the router %s : %w", staticRoute.prefix, staticRoute.nexthop, zic.networkClusterRouterName, err) - } - } - - return nil + _, err := libovsdbops.TransactAndCheck(zic.nbClient, ops) + return err } // deleteLocalNodeStaticRoutes deletes the static routes added by the function addRemoteNodeStaticRoutes func (zic *ZoneInterconnectHandler) deleteLocalNodeStaticRoutes(node *corev1.Node, nodeTransitSwitchPortIPs []*net.IPNet) error { + // skip types.NetworkExternalID check in the predicate function as this static route may be deleted + // before types.NetworkExternalID external-ids is set correctly during upgrade. deleteRoute := func(prefix, nexthop string) error { p := func(lrsr *nbdb.LogicalRouterStaticRoute) bool { return lrsr.IPPrefix == prefix && diff --git a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go index e2cbeb3c8b..c0a54a1d61 100644 --- a/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go +++ b/go-controller/pkg/ovn/zone_interconnect/zone_ic_handler_test.go @@ -13,7 +13,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" ovncnitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cni/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" @@ -89,6 +89,15 @@ func invokeICHandlerAddNodeFunction(zone string, icHandler *ZoneInterconnectHand return nil } +func invokeICHandlerDeleteNodeFunction(icHandler *ZoneInterconnectHandler, nodes ...*corev1.Node) error { + for _, node := range nodes { + err := icHandler.DeleteNode(node) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + return nil +} + func checkInterconnectResources(zone string, netName string, nbClient libovsdbclient.Client, testNodesRouteInfo map[string]map[string]string, nodes ...*corev1.Node) error { localZoneNodes := []*corev1.Node{} remoteZoneNodes := []*corev1.Node{} @@ -250,6 +259,7 @@ var _ = ginkgo.Describe("Zone Interconnect Operations", func() { initialNBDB []libovsdbtest.TestData initialSBDB []libovsdbtest.TestData testNodesRouteInfo map[string]map[string]string + nodeRouteInfoMap map[string]map[string]map[string]string ) const ( @@ -736,6 +746,137 @@ var _ = ginkgo.Describe("Zone Interconnect Operations", func() { }) }) + ginkgo.Context("Two secondary networks", func() { + ginkgo.BeforeEach(func() { + testNode1 = corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + Annotations: map[string]string{ + ovnNodeChassisIDAnnotatin: "cb9ec8fa-b409-4ef3-9f42-d9283c47aac6", + ovnNodeZoneNameAnnotation: "global", + ovnNodeIDAnnotaton: "2", + ovnNodeSubnetsAnnotation: "{\"red\":[\"10.244.2.0/24\"], \"blue\":[\"11.244.2.0/24\"]}", + ovnTransitSwitchPortAddrAnnotation: "{\"ipv4\":\"100.88.0.2/16\"}", + util.OVNNodeGRLRPAddrs: "{\"default\":{\"ipv4\":\"100.64.0.2/16\"}}", + ovnNodeNetworkIDsAnnotation: "{\"red\":\"2\", \"blue\":\"1\"}", + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{{Type: corev1.NodeInternalIP, Address: "10.0.0.10"}}, + }, + } + // node2 is a remote zone node + testNode2 = corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + Annotations: map[string]string{ + ovnNodeChassisIDAnnotatin: "cb9ec8fa-b409-4ef3-9f42-d9283c47aac7", + ovnNodeZoneNameAnnotation: "foo", + ovnNodeIDAnnotaton: "3", + ovnNodeSubnetsAnnotation: "{\"red\":[\"10.244.3.0/24\"], \"blue\":[\"11.244.3.0/24\"]}", + ovnTransitSwitchPortAddrAnnotation: "{\"ipv4\":\"100.88.0.3/16\"}", + util.OVNNodeGRLRPAddrs: "{\"defalut\":{\"ipv4\":\"100.64.0.3/16\"}}", + ovnNodeNetworkIDsAnnotation: "{\"red\":\"2\", \"blue\":\"1\"}", + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{{Type: corev1.NodeInternalIP, Address: "10.0.0.11"}}, + }, + } + // node3 is a remote zone node + testNode3 = corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node3", + Annotations: map[string]string{ + ovnNodeChassisIDAnnotatin: "cb9ec8fa-b409-4ef3-9f42-d9283c47aac8", + ovnNodeZoneNameAnnotation: "foo", + ovnNodeIDAnnotaton: "4", + ovnNodeSubnetsAnnotation: "{\"red\":[\"10.244.4.0/24\"], \"blue\":[\"11.244.4.0/24\"]}", + ovnTransitSwitchPortAddrAnnotation: "{\"ipv4\":\"100.88.0.4/16\"}", + util.OVNNodeGRLRPAddrs: "{\"default\":{\"ipv4\":\"100.64.0.4/16\"}}", + ovnNodeNetworkIDsAnnotation: "{\"red\":\"2\", \"blue\":\"1\"}", + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{{Type: corev1.NodeInternalIP, Address: "10.0.0.12"}}, + }, + } + + nodeRouteInfoMap = map[string]map[string]map[string]string{ + "red": { + "node1": {"node-subnets": "10.244.2.0/24", "ts-ip": "100.88.0.2", "host-route": "100.64.0.2/32"}, + "node2": {"node-subnets": "10.244.3.0/24", "ts-ip": "100.88.0.3", "host-route": "100.64.0.3/32"}, + "node3": {"node-subnets": "10.244.4.0/24", "ts-ip": "100.88.0.4", "host-route": "100.64.0.4/32"}, + }, + "blue": { + "node1": {"node-subnets": "11.244.2.0/24", "ts-ip": "100.88.0.2", "host-route": "100.64.0.2/32"}, + "node2": {"node-subnets": "11.244.3.0/24", "ts-ip": "100.88.0.3", "host-route": "100.64.0.3/32"}, + "node3": {"node-subnets": "11.244.4.0/24", "ts-ip": "100.88.0.4", "host-route": "100.64.0.4/32"}, + }, + } + initialNBDB = []libovsdbtest.TestData{ + newOVNClusterRouter("blue"), + newOVNClusterRouter("red"), + } + + initialSBDB = []libovsdbtest.TestData{ + &node1Chassis, &node2Chassis, &node3Chassis} + }) + + ginkgo.It("Delete remote node", func() { + app.Action = func(ctx *cli.Context) error { + dbSetup := libovsdbtest.TestSetup{ + NBData: initialNBDB, + SBData: initialSBDB, + } + + _, err := config.InitConfig(ctx, nil, nil) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + config.Kubernetes.HostNetworkNamespace = "" + + var libovsdbOvnNBClient, libovsdbOvnSBClient libovsdbclient.Client + libovsdbOvnNBClient, libovsdbOvnSBClient, libovsdbCleanup, err = libovsdbtest.NewNBSBTestHarness(dbSetup) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + zoneICHandler := map[string]*ZoneInterconnectHandler{} + for _, netName := range []string{"red", "blue"} { + err = createTransitSwitchPortBindings(libovsdbOvnSBClient, netName, &testNode1, &testNode2, &testNode3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + netInfo, err := util.NewNetInfo(&ovncnitypes.NetConf{NetConf: cnitypes.NetConf{Name: netName}, Topology: types.Layer3Topology}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + zoneICHandler[netName] = NewZoneInterconnectHandler(netInfo, libovsdbOvnNBClient, libovsdbOvnSBClient, nil) + err = zoneICHandler[netName].createOrUpdateTransitSwitch(1) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = invokeICHandlerAddNodeFunction("global", zoneICHandler[netName], &testNode1, &testNode2, &testNode3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = checkInterconnectResources("global", netName, libovsdbOvnNBClient, nodeRouteInfoMap[netName], &testNode1, &testNode2, &testNode3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + + // Check the logical entities are as expected when a remote node is deleted + ginkgo.By("Delete remote node \"red\"") + delete(nodeRouteInfoMap["red"], "node3") + err = invokeICHandlerDeleteNodeFunction(zoneICHandler["red"], &testNode3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = checkInterconnectResources("global", "red", libovsdbOvnNBClient, nodeRouteInfoMap["red"], &testNode1, &testNode2) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + err = checkInterconnectResources("global", "blue", libovsdbOvnNBClient, nodeRouteInfoMap["blue"], &testNode1, &testNode2, &testNode3) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + return nil + } + + err := app.Run([]string{ + app.Name, + "-cluster-subnets=" + clusterCIDR, + "-init-cluster-manager", + "-zone-join-switch-subnets=" + joinSubnetCIDR, + "-enable-interconnect", + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) + ginkgo.Context("Error scenarios", func() { ginkgo.It("Missing annotations and error scenarios for local node", func() { app.Action = func(ctx *cli.Context) error { diff --git a/go-controller/pkg/ovndbmanager/ovndbmanager.go b/go-controller/pkg/ovndbmanager/ovndbmanager.go index 2c5fac6ab9..7e26bb2b98 100644 --- a/go-controller/pkg/ovndbmanager/ovndbmanager.go +++ b/go-controller/pkg/ovndbmanager/ovndbmanager.go @@ -226,7 +226,7 @@ func ensureClusterRaftMembership(db *util.OvsDbProperties, kclient kube.Interfac r = regexp.MustCompile(`([a-z0-9]{4}) at ` + dbServerRegexp) members := r.FindAllStringSubmatch(out, -1) kickedMembersCount := 0 - dbPods, err := kclient.GetPods(config.Kubernetes.OVNConfigNamespace, metav1.ListOptions{ + dbPods, err := kclient.GetPodsForDBChecker(config.Kubernetes.OVNConfigNamespace, metav1.ListOptions{ LabelSelector: labels.Set(map[string]string{"ovn-db-pod": "true"}).String(), }) if err != nil { diff --git a/go-controller/pkg/ovnwebhook/nodeadmission.go b/go-controller/pkg/ovnwebhook/nodeadmission.go index b21a51bc87..08509903c9 100644 --- a/go-controller/pkg/ovnwebhook/nodeadmission.go +++ b/go-controller/pkg/ovnwebhook/nodeadmission.go @@ -34,6 +34,7 @@ var commonNodeAnnotationChecks = map[string]checkNodeAnnot{ util.OvnNodeMasqCIDR: nil, util.OvnNodeGatewayMtuSupport: nil, util.OvnNodeManagementPort: nil, + util.OvnNodeDontSNATSubnets: nil, util.OvnNodeChassisID: func(v annotationChange, _ string) error { if v.action == removed { return fmt.Errorf("%s cannot be removed", util.OvnNodeChassisID) diff --git a/go-controller/pkg/retry/obj_retry.go b/go-controller/pkg/retry/obj_retry.go index 46484ae4fe..fc17d07b50 100644 --- a/go-controller/pkg/retry/obj_retry.go +++ b/go-controller/pkg/retry/obj_retry.go @@ -9,14 +9,11 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/factory" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/kube" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/metrics" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/syncmap" ovntypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" @@ -772,38 +769,35 @@ func (r *RetryFramework) WatchResourceFiltered(namespaceForFilteredHandler strin return handler, nil } -// getPendingPods returns all pods that are in the Pending state -func getPendingPods(kubeClient kube.InterfaceOVN) ([]*corev1.Pod, error) { - var allPods []*corev1.Pod - - pods, err := kubeClient.GetPods(corev1.NamespaceAll, metav1.ListOptions{ - FieldSelector: fields.OneTermEqualSelector("status.phase", string(corev1.PodPending)).String(), - }) - if err != nil { - return nil, err - } - allPods = append(allPods, pods...) - return allPods, nil -} - // RequeuePendingPods enqueues all Pending pods into the retryPods associated with netInfo. -func RequeuePendingPods(kubeClient kube.InterfaceOVN, netInfo util.NetInfo, retryPods *RetryFramework) error { +func RequeuePendingPods(wf *factory.WatchFactory, netInfo util.NetInfo, retryPods *RetryFramework) error { var errs []error // NOTE: A pod may reference a NAD from a different namespace, so check all pending pods. - allPods, err := getPendingPods(kubeClient) + allPods, err := wf.GetAllPods() if err != nil { - return err + return fmt.Errorf("failed to get all pods: %w", err) } + podsAdded := false for _, pod := range allPods { pod := *pod + if !util.PodScheduled(&pod) { + continue + } + if pod.Status.Phase != corev1.PodPending { + continue + } klog.V(5).Infof("Adding pending pod %s/%s to retryPods for network %s", pod.Namespace, pod.Name, netInfo.GetNetworkName()) err := retryPods.AddRetryObjWithAddNoBackoff(&pod) if err != nil { errs = append(errs, err) + continue } + podsAdded = true + } + if podsAdded { + retryPods.RequestRetryObjs() } - retryPods.RequestRetryObjs() return utilerrors.Join(errs...) } diff --git a/go-controller/pkg/sbdb/acl_id.go b/go-controller/pkg/sbdb/acl_id.go new file mode 100644 index 0000000000..5c62c53fe2 --- /dev/null +++ b/go-controller/pkg/sbdb/acl_id.go @@ -0,0 +1,54 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-kubernetes/libovsdb/model" + +const ACLIDTable = "ACL_ID" + +// ACLID defines an object in ACL_ID table +type ACLID struct { + UUID string `ovsdb:"_uuid"` + ID int `ovsdb:"id"` +} + +func (a *ACLID) GetUUID() string { + return a.UUID +} + +func (a *ACLID) GetID() int { + return a.ID +} + +func (a *ACLID) DeepCopyInto(b *ACLID) { + *b = *a +} + +func (a *ACLID) DeepCopy() *ACLID { + b := new(ACLID) + a.DeepCopyInto(b) + return b +} + +func (a *ACLID) CloneModelInto(b model.Model) { + c := b.(*ACLID) + a.DeepCopyInto(c) +} + +func (a *ACLID) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ACLID) Equals(b *ACLID) bool { + return a.UUID == b.UUID && + a.ID == b.ID +} + +func (a *ACLID) EqualsModel(b model.Model) bool { + c := b.(*ACLID) + return a.Equals(c) +} + +var _ model.CloneableModel = &ACLID{} +var _ model.ComparableModel = &ACLID{} diff --git a/go-controller/pkg/sbdb/address_set.go b/go-controller/pkg/sbdb/address_set.go index b3b1c3c2d8..88b221dedf 100644 --- a/go-controller/pkg/sbdb/address_set.go +++ b/go-controller/pkg/sbdb/address_set.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const AddressSetTable = "Address_Set" diff --git a/go-controller/pkg/sbdb/advertised_route.go b/go-controller/pkg/sbdb/advertised_route.go new file mode 100644 index 0000000000..6704be7d4d --- /dev/null +++ b/go-controller/pkg/sbdb/advertised_route.go @@ -0,0 +1,124 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-kubernetes/libovsdb/model" + +const AdvertisedRouteTable = "Advertised_Route" + +// AdvertisedRoute defines an object in Advertised_Route table +type AdvertisedRoute struct { + UUID string `ovsdb:"_uuid"` + Datapath string `ovsdb:"datapath"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + IPPrefix string `ovsdb:"ip_prefix"` + LogicalPort string `ovsdb:"logical_port"` + TrackedPort *string `ovsdb:"tracked_port"` +} + +func (a *AdvertisedRoute) GetUUID() string { + return a.UUID +} + +func (a *AdvertisedRoute) GetDatapath() string { + return a.Datapath +} + +func (a *AdvertisedRoute) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyAdvertisedRouteExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalAdvertisedRouteExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *AdvertisedRoute) GetIPPrefix() string { + return a.IPPrefix +} + +func (a *AdvertisedRoute) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *AdvertisedRoute) GetTrackedPort() *string { + return a.TrackedPort +} + +func copyAdvertisedRouteTrackedPort(a *string) *string { + if a == nil { + return nil + } + b := *a + return &b +} + +func equalAdvertisedRouteTrackedPort(a, b *string) bool { + if (a == nil) != (b == nil) { + return false + } + if a == b { + return true + } + return *a == *b +} + +func (a *AdvertisedRoute) DeepCopyInto(b *AdvertisedRoute) { + *b = *a + b.ExternalIDs = copyAdvertisedRouteExternalIDs(a.ExternalIDs) + b.TrackedPort = copyAdvertisedRouteTrackedPort(a.TrackedPort) +} + +func (a *AdvertisedRoute) DeepCopy() *AdvertisedRoute { + b := new(AdvertisedRoute) + a.DeepCopyInto(b) + return b +} + +func (a *AdvertisedRoute) CloneModelInto(b model.Model) { + c := b.(*AdvertisedRoute) + a.DeepCopyInto(c) +} + +func (a *AdvertisedRoute) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *AdvertisedRoute) Equals(b *AdvertisedRoute) bool { + return a.UUID == b.UUID && + a.Datapath == b.Datapath && + equalAdvertisedRouteExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.IPPrefix == b.IPPrefix && + a.LogicalPort == b.LogicalPort && + equalAdvertisedRouteTrackedPort(a.TrackedPort, b.TrackedPort) +} + +func (a *AdvertisedRoute) EqualsModel(b model.Model) bool { + c := b.(*AdvertisedRoute) + return a.Equals(c) +} + +var _ model.CloneableModel = &AdvertisedRoute{} +var _ model.ComparableModel = &AdvertisedRoute{} diff --git a/go-controller/pkg/sbdb/bfd.go b/go-controller/pkg/sbdb/bfd.go index cf27814b51..eb3822e902 100644 --- a/go-controller/pkg/sbdb/bfd.go +++ b/go-controller/pkg/sbdb/bfd.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const BFDTable = "BFD" diff --git a/go-controller/pkg/sbdb/chassis.go b/go-controller/pkg/sbdb/chassis.go index 3526f096f2..3cbffee206 100644 --- a/go-controller/pkg/sbdb/chassis.go +++ b/go-controller/pkg/sbdb/chassis.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ChassisTable = "Chassis" diff --git a/go-controller/pkg/sbdb/chassis_private.go b/go-controller/pkg/sbdb/chassis_private.go index 1e8c3764bd..dc848a1569 100644 --- a/go-controller/pkg/sbdb/chassis_private.go +++ b/go-controller/pkg/sbdb/chassis_private.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ChassisPrivateTable = "Chassis_Private" diff --git a/go-controller/pkg/sbdb/chassis_template_var.go b/go-controller/pkg/sbdb/chassis_template_var.go index 212e772be6..2e8213ade8 100644 --- a/go-controller/pkg/sbdb/chassis_template_var.go +++ b/go-controller/pkg/sbdb/chassis_template_var.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ChassisTemplateVarTable = "Chassis_Template_Var" diff --git a/go-controller/pkg/sbdb/connection.go b/go-controller/pkg/sbdb/connection.go index 8f96f54226..2deb8bd30a 100644 --- a/go-controller/pkg/sbdb/connection.go +++ b/go-controller/pkg/sbdb/connection.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ConnectionTable = "Connection" diff --git a/go-controller/pkg/sbdb/controller_event.go b/go-controller/pkg/sbdb/controller_event.go index 741ffd028a..0233181ca6 100644 --- a/go-controller/pkg/sbdb/controller_event.go +++ b/go-controller/pkg/sbdb/controller_event.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ControllerEventTable = "Controller_Event" diff --git a/go-controller/pkg/sbdb/datapath_binding.go b/go-controller/pkg/sbdb/datapath_binding.go index 10247286f7..295660e9c3 100644 --- a/go-controller/pkg/sbdb/datapath_binding.go +++ b/go-controller/pkg/sbdb/datapath_binding.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DatapathBindingTable = "Datapath_Binding" diff --git a/go-controller/pkg/sbdb/dhcp_options.go b/go-controller/pkg/sbdb/dhcp_options.go index e9ec44ce29..e0bb7627f1 100644 --- a/go-controller/pkg/sbdb/dhcp_options.go +++ b/go-controller/pkg/sbdb/dhcp_options.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DHCPOptionsTable = "DHCP_Options" diff --git a/go-controller/pkg/sbdb/dhcpv6_options.go b/go-controller/pkg/sbdb/dhcpv6_options.go index 908d1e0ad0..95a2a8d8f4 100644 --- a/go-controller/pkg/sbdb/dhcpv6_options.go +++ b/go-controller/pkg/sbdb/dhcpv6_options.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DHCPv6OptionsTable = "DHCPv6_Options" diff --git a/go-controller/pkg/sbdb/dns.go b/go-controller/pkg/sbdb/dns.go index 95c0a52d1e..c044f990b0 100644 --- a/go-controller/pkg/sbdb/dns.go +++ b/go-controller/pkg/sbdb/dns.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DNSTable = "DNS" diff --git a/go-controller/pkg/sbdb/ecmp_nexthop.go b/go-controller/pkg/sbdb/ecmp_nexthop.go new file mode 100644 index 0000000000..2b0124a788 --- /dev/null +++ b/go-controller/pkg/sbdb/ecmp_nexthop.go @@ -0,0 +1,105 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-kubernetes/libovsdb/model" + +const ECMPNexthopTable = "ECMP_Nexthop" + +// ECMPNexthop defines an object in ECMP_Nexthop table +type ECMPNexthop struct { + UUID string `ovsdb:"_uuid"` + Datapath string `ovsdb:"datapath"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + MAC string `ovsdb:"mac"` + Nexthop string `ovsdb:"nexthop"` + Port string `ovsdb:"port"` +} + +func (a *ECMPNexthop) GetUUID() string { + return a.UUID +} + +func (a *ECMPNexthop) GetDatapath() string { + return a.Datapath +} + +func (a *ECMPNexthop) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyECMPNexthopExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalECMPNexthopExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *ECMPNexthop) GetMAC() string { + return a.MAC +} + +func (a *ECMPNexthop) GetNexthop() string { + return a.Nexthop +} + +func (a *ECMPNexthop) GetPort() string { + return a.Port +} + +func (a *ECMPNexthop) DeepCopyInto(b *ECMPNexthop) { + *b = *a + b.ExternalIDs = copyECMPNexthopExternalIDs(a.ExternalIDs) +} + +func (a *ECMPNexthop) DeepCopy() *ECMPNexthop { + b := new(ECMPNexthop) + a.DeepCopyInto(b) + return b +} + +func (a *ECMPNexthop) CloneModelInto(b model.Model) { + c := b.(*ECMPNexthop) + a.DeepCopyInto(c) +} + +func (a *ECMPNexthop) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *ECMPNexthop) Equals(b *ECMPNexthop) bool { + return a.UUID == b.UUID && + a.Datapath == b.Datapath && + equalECMPNexthopExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.MAC == b.MAC && + a.Nexthop == b.Nexthop && + a.Port == b.Port +} + +func (a *ECMPNexthop) EqualsModel(b model.Model) bool { + c := b.(*ECMPNexthop) + return a.Equals(c) +} + +var _ model.CloneableModel = &ECMPNexthop{} +var _ model.ComparableModel = &ECMPNexthop{} diff --git a/go-controller/pkg/sbdb/encap.go b/go-controller/pkg/sbdb/encap.go index 9a2f17fba2..4c524a52ca 100644 --- a/go-controller/pkg/sbdb/encap.go +++ b/go-controller/pkg/sbdb/encap.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const EncapTable = "Encap" diff --git a/go-controller/pkg/sbdb/fdb.go b/go-controller/pkg/sbdb/fdb.go index 8253e7059b..346593ac6f 100644 --- a/go-controller/pkg/sbdb/fdb.go +++ b/go-controller/pkg/sbdb/fdb.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const FDBTable = "FDB" diff --git a/go-controller/pkg/sbdb/gateway_chassis.go b/go-controller/pkg/sbdb/gateway_chassis.go index a84ad7fc47..f08883222d 100644 --- a/go-controller/pkg/sbdb/gateway_chassis.go +++ b/go-controller/pkg/sbdb/gateway_chassis.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const GatewayChassisTable = "Gateway_Chassis" diff --git a/go-controller/pkg/sbdb/ha_chassis.go b/go-controller/pkg/sbdb/ha_chassis.go index b0b3cebbba..b40d7999e3 100644 --- a/go-controller/pkg/sbdb/ha_chassis.go +++ b/go-controller/pkg/sbdb/ha_chassis.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const HAChassisTable = "HA_Chassis" diff --git a/go-controller/pkg/sbdb/ha_chassis_group.go b/go-controller/pkg/sbdb/ha_chassis_group.go index 1cc013c705..72a5622f5b 100644 --- a/go-controller/pkg/sbdb/ha_chassis_group.go +++ b/go-controller/pkg/sbdb/ha_chassis_group.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const HAChassisGroupTable = "HA_Chassis_Group" diff --git a/go-controller/pkg/sbdb/igmp_group.go b/go-controller/pkg/sbdb/igmp_group.go index 73a0bb9437..19381eb855 100644 --- a/go-controller/pkg/sbdb/igmp_group.go +++ b/go-controller/pkg/sbdb/igmp_group.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const IGMPGroupTable = "IGMP_Group" diff --git a/go-controller/pkg/sbdb/ip_multicast.go b/go-controller/pkg/sbdb/ip_multicast.go index 493cd342d2..902b7204f1 100644 --- a/go-controller/pkg/sbdb/ip_multicast.go +++ b/go-controller/pkg/sbdb/ip_multicast.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const IPMulticastTable = "IP_Multicast" diff --git a/go-controller/pkg/sbdb/learned_route.go b/go-controller/pkg/sbdb/learned_route.go new file mode 100644 index 0000000000..8cab3636de --- /dev/null +++ b/go-controller/pkg/sbdb/learned_route.go @@ -0,0 +1,105 @@ +// Code generated by "libovsdb.modelgen" +// DO NOT EDIT. + +package sbdb + +import "github.com/ovn-kubernetes/libovsdb/model" + +const LearnedRouteTable = "Learned_Route" + +// LearnedRoute defines an object in Learned_Route table +type LearnedRoute struct { + UUID string `ovsdb:"_uuid"` + Datapath string `ovsdb:"datapath"` + ExternalIDs map[string]string `ovsdb:"external_ids"` + IPPrefix string `ovsdb:"ip_prefix"` + LogicalPort string `ovsdb:"logical_port"` + Nexthop string `ovsdb:"nexthop"` +} + +func (a *LearnedRoute) GetUUID() string { + return a.UUID +} + +func (a *LearnedRoute) GetDatapath() string { + return a.Datapath +} + +func (a *LearnedRoute) GetExternalIDs() map[string]string { + return a.ExternalIDs +} + +func copyLearnedRouteExternalIDs(a map[string]string) map[string]string { + if a == nil { + return nil + } + b := make(map[string]string, len(a)) + for k, v := range a { + b[k] = v + } + return b +} + +func equalLearnedRouteExternalIDs(a, b map[string]string) bool { + if (a == nil) != (b == nil) { + return false + } + if len(a) != len(b) { + return false + } + for k, v := range a { + if w, ok := b[k]; !ok || v != w { + return false + } + } + return true +} + +func (a *LearnedRoute) GetIPPrefix() string { + return a.IPPrefix +} + +func (a *LearnedRoute) GetLogicalPort() string { + return a.LogicalPort +} + +func (a *LearnedRoute) GetNexthop() string { + return a.Nexthop +} + +func (a *LearnedRoute) DeepCopyInto(b *LearnedRoute) { + *b = *a + b.ExternalIDs = copyLearnedRouteExternalIDs(a.ExternalIDs) +} + +func (a *LearnedRoute) DeepCopy() *LearnedRoute { + b := new(LearnedRoute) + a.DeepCopyInto(b) + return b +} + +func (a *LearnedRoute) CloneModelInto(b model.Model) { + c := b.(*LearnedRoute) + a.DeepCopyInto(c) +} + +func (a *LearnedRoute) CloneModel() model.Model { + return a.DeepCopy() +} + +func (a *LearnedRoute) Equals(b *LearnedRoute) bool { + return a.UUID == b.UUID && + a.Datapath == b.Datapath && + equalLearnedRouteExternalIDs(a.ExternalIDs, b.ExternalIDs) && + a.IPPrefix == b.IPPrefix && + a.LogicalPort == b.LogicalPort && + a.Nexthop == b.Nexthop +} + +func (a *LearnedRoute) EqualsModel(b model.Model) bool { + c := b.(*LearnedRoute) + return a.Equals(c) +} + +var _ model.CloneableModel = &LearnedRoute{} +var _ model.ComparableModel = &LearnedRoute{} diff --git a/go-controller/pkg/sbdb/load_balancer.go b/go-controller/pkg/sbdb/load_balancer.go index bc341807e7..7bf4da265a 100644 --- a/go-controller/pkg/sbdb/load_balancer.go +++ b/go-controller/pkg/sbdb/load_balancer.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LoadBalancerTable = "Load_Balancer" diff --git a/go-controller/pkg/sbdb/logical_dp_group.go b/go-controller/pkg/sbdb/logical_dp_group.go index 911de2eed0..86727f4486 100644 --- a/go-controller/pkg/sbdb/logical_dp_group.go +++ b/go-controller/pkg/sbdb/logical_dp_group.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalDPGroupTable = "Logical_DP_Group" diff --git a/go-controller/pkg/sbdb/logical_flow.go b/go-controller/pkg/sbdb/logical_flow.go index 42af1cdf54..da2341990d 100644 --- a/go-controller/pkg/sbdb/logical_flow.go +++ b/go-controller/pkg/sbdb/logical_flow.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const LogicalFlowTable = "Logical_Flow" diff --git a/go-controller/pkg/sbdb/mac_binding.go b/go-controller/pkg/sbdb/mac_binding.go index 705431f1d0..9764c6dc35 100644 --- a/go-controller/pkg/sbdb/mac_binding.go +++ b/go-controller/pkg/sbdb/mac_binding.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MACBindingTable = "MAC_Binding" diff --git a/go-controller/pkg/sbdb/meter.go b/go-controller/pkg/sbdb/meter.go index 95c4daec2f..9d86874c0b 100644 --- a/go-controller/pkg/sbdb/meter.go +++ b/go-controller/pkg/sbdb/meter.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MeterTable = "Meter" diff --git a/go-controller/pkg/sbdb/meter_band.go b/go-controller/pkg/sbdb/meter_band.go index addb01b645..10d3d740f8 100644 --- a/go-controller/pkg/sbdb/meter_band.go +++ b/go-controller/pkg/sbdb/meter_band.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MeterBandTable = "Meter_Band" diff --git a/go-controller/pkg/sbdb/mirror.go b/go-controller/pkg/sbdb/mirror.go index 69444ea735..b9139214ca 100644 --- a/go-controller/pkg/sbdb/mirror.go +++ b/go-controller/pkg/sbdb/mirror.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MirrorTable = "Mirror" diff --git a/go-controller/pkg/sbdb/model.go b/go-controller/pkg/sbdb/model.go index bc838fe497..0d9fe177bf 100644 --- a/go-controller/pkg/sbdb/model.go +++ b/go-controller/pkg/sbdb/model.go @@ -6,14 +6,16 @@ package sbdb import ( "encoding/json" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // FullDatabaseModel returns the DatabaseModel object to be used in libovsdb func FullDatabaseModel() (model.ClientDBModel, error) { return model.NewClientDBModel("OVN_Southbound", map[string]model.Model{ + "ACL_ID": &ACLID{}, "Address_Set": &AddressSet{}, + "Advertised_Route": &AdvertisedRoute{}, "BFD": &BFD{}, "Chassis": &Chassis{}, "Chassis_Private": &ChassisPrivate{}, @@ -24,6 +26,7 @@ func FullDatabaseModel() (model.ClientDBModel, error) { "DHCPv6_Options": &DHCPv6Options{}, "DNS": &DNS{}, "Datapath_Binding": &DatapathBinding{}, + "ECMP_Nexthop": &ECMPNexthop{}, "Encap": &Encap{}, "FDB": &FDB{}, "Gateway_Chassis": &GatewayChassis{}, @@ -31,6 +34,7 @@ func FullDatabaseModel() (model.ClientDBModel, error) { "HA_Chassis_Group": &HAChassisGroup{}, "IGMP_Group": &IGMPGroup{}, "IP_Multicast": &IPMulticast{}, + "Learned_Route": &LearnedRoute{}, "Load_Balancer": &LoadBalancer{}, "Logical_DP_Group": &LogicalDPGroup{}, "Logical_Flow": &LogicalFlow{}, @@ -52,8 +56,22 @@ func FullDatabaseModel() (model.ClientDBModel, error) { var schema = `{ "name": "OVN_Southbound", - "version": "20.37.0", + "version": "20.41.0", "tables": { + "ACL_ID": { + "columns": { + "id": { + "type": { + "key": { + "type": "integer", + "minInteger": 0, + "maxInteger": 32767 + } + } + } + }, + "isRoot": true + }, "Address_Set": { "columns": { "addresses": { @@ -76,6 +94,63 @@ var schema = `{ ], "isRoot": true }, + "Advertised_Route": { + "columns": { + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding", + "refType": "strong" + } + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ip_prefix": { + "type": "string" + }, + "logical_port": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port_Binding", + "refType": "strong" + } + } + }, + "tracked_port": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port_Binding", + "refType": "strong" + }, + "min": 0, + "max": 1 + } + } + }, + "indexes": [ + [ + "datapath", + "logical_port", + "ip_prefix", + "tracked_port" + ] + ], + "isRoot": true + }, "BFD": { "columns": { "chassis_name": { @@ -576,6 +651,57 @@ var schema = `{ ], "isRoot": true }, + "ECMP_Nexthop": { + "columns": { + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding", + "refType": "strong" + }, + "min": 1, + "max": 1 + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "mac": { + "type": "string" + }, + "nexthop": { + "type": "string" + }, + "port": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port_Binding", + "refType": "strong" + }, + "min": 1, + "max": 1 + } + } + }, + "indexes": [ + [ + "nexthop", + "port" + ] + ], + "isRoot": true + }, "Encap": { "columns": { "chassis_name": { @@ -932,6 +1058,55 @@ var schema = `{ ], "isRoot": true }, + "Learned_Route": { + "columns": { + "datapath": { + "type": { + "key": { + "type": "uuid", + "refTable": "Datapath_Binding", + "refType": "strong" + } + } + }, + "external_ids": { + "type": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "min": 0, + "max": "unlimited" + } + }, + "ip_prefix": { + "type": "string" + }, + "logical_port": { + "type": { + "key": { + "type": "uuid", + "refTable": "Port_Binding", + "refType": "strong" + } + } + }, + "nexthop": { + "type": "string" + } + }, + "indexes": [ + [ + "datapath", + "logical_port", + "ip_prefix", + "nexthop" + ] + ], + "isRoot": true + }, "Load_Balancer": { "columns": { "datapath_group": { @@ -1741,6 +1916,9 @@ var schema = `{ "ssl_ciphers": { "type": "string" }, + "ssl_ciphersuites": { + "type": "string" + }, "ssl_protocols": { "type": "string" } diff --git a/go-controller/pkg/sbdb/multicast_group.go b/go-controller/pkg/sbdb/multicast_group.go index 1af933ea6c..b8e2a828d9 100644 --- a/go-controller/pkg/sbdb/multicast_group.go +++ b/go-controller/pkg/sbdb/multicast_group.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MulticastGroupTable = "Multicast_Group" diff --git a/go-controller/pkg/sbdb/port_binding.go b/go-controller/pkg/sbdb/port_binding.go index b3d30f843a..48668023fc 100644 --- a/go-controller/pkg/sbdb/port_binding.go +++ b/go-controller/pkg/sbdb/port_binding.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const PortBindingTable = "Port_Binding" diff --git a/go-controller/pkg/sbdb/port_group.go b/go-controller/pkg/sbdb/port_group.go index 358e26b33d..e197ae6e4d 100644 --- a/go-controller/pkg/sbdb/port_group.go +++ b/go-controller/pkg/sbdb/port_group.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const PortGroupTable = "Port_Group" diff --git a/go-controller/pkg/sbdb/rbac_permission.go b/go-controller/pkg/sbdb/rbac_permission.go index 9d760527e9..228c56bfe8 100644 --- a/go-controller/pkg/sbdb/rbac_permission.go +++ b/go-controller/pkg/sbdb/rbac_permission.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const RBACPermissionTable = "RBAC_Permission" diff --git a/go-controller/pkg/sbdb/rbac_role.go b/go-controller/pkg/sbdb/rbac_role.go index ce8798645c..427582d3b8 100644 --- a/go-controller/pkg/sbdb/rbac_role.go +++ b/go-controller/pkg/sbdb/rbac_role.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const RBACRoleTable = "RBAC_Role" diff --git a/go-controller/pkg/sbdb/sb_global.go b/go-controller/pkg/sbdb/sb_global.go index 2374478db7..667fdae3e0 100644 --- a/go-controller/pkg/sbdb/sb_global.go +++ b/go-controller/pkg/sbdb/sb_global.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SBGlobalTable = "SB_Global" diff --git a/go-controller/pkg/sbdb/service_monitor.go b/go-controller/pkg/sbdb/service_monitor.go index d3e1188680..189f09f659 100644 --- a/go-controller/pkg/sbdb/service_monitor.go +++ b/go-controller/pkg/sbdb/service_monitor.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ServiceMonitorTable = "Service_Monitor" diff --git a/go-controller/pkg/sbdb/ssl.go b/go-controller/pkg/sbdb/ssl.go index 3fab5fd1e9..eccda6dff3 100644 --- a/go-controller/pkg/sbdb/ssl.go +++ b/go-controller/pkg/sbdb/ssl.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SSLTable = "SSL" @@ -16,6 +16,7 @@ type SSL struct { ExternalIDs map[string]string `ovsdb:"external_ids"` PrivateKey string `ovsdb:"private_key"` SSLCiphers string `ovsdb:"ssl_ciphers"` + SSLCiphersuites string `ovsdb:"ssl_ciphersuites"` SSLProtocols string `ovsdb:"ssl_protocols"` } @@ -73,6 +74,10 @@ func (a *SSL) GetSSLCiphers() string { return a.SSLCiphers } +func (a *SSL) GetSSLCiphersuites() string { + return a.SSLCiphersuites +} + func (a *SSL) GetSSLProtocols() string { return a.SSLProtocols } @@ -105,6 +110,7 @@ func (a *SSL) Equals(b *SSL) bool { equalSSLExternalIDs(a.ExternalIDs, b.ExternalIDs) && a.PrivateKey == b.PrivateKey && a.SSLCiphers == b.SSLCiphers && + a.SSLCiphersuites == b.SSLCiphersuites && a.SSLProtocols == b.SSLProtocols } diff --git a/go-controller/pkg/sbdb/static_mac_binding.go b/go-controller/pkg/sbdb/static_mac_binding.go index 370968f604..8a3c590e31 100644 --- a/go-controller/pkg/sbdb/static_mac_binding.go +++ b/go-controller/pkg/sbdb/static_mac_binding.go @@ -3,7 +3,7 @@ package sbdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const StaticMACBindingTable = "Static_MAC_Binding" diff --git a/go-controller/pkg/testing/libovsdb/libovsdb.go b/go-controller/pkg/testing/libovsdb/libovsdb.go index 8f10bab356..a6836811d1 100644 --- a/go-controller/pkg/testing/libovsdb/libovsdb.go +++ b/go-controller/pkg/testing/libovsdb/libovsdb.go @@ -21,14 +21,14 @@ import ( "k8s.io/apimachinery/pkg/util/wait" - libovsdbclient "github.com/ovn-org/libovsdb/client" - "github.com/ovn-org/libovsdb/database" - "github.com/ovn-org/libovsdb/database/inmemory" - "github.com/ovn-org/libovsdb/mapper" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" - "github.com/ovn-org/libovsdb/ovsdb/serverdb" - "github.com/ovn-org/libovsdb/server" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" + "github.com/ovn-kubernetes/libovsdb/database" + "github.com/ovn-kubernetes/libovsdb/database/inmemory" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb" + "github.com/ovn-kubernetes/libovsdb/server" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/cryptorand" diff --git a/go-controller/pkg/testing/libovsdb/matchers.go b/go-controller/pkg/testing/libovsdb/matchers.go index 102d8fbc63..1ff3977065 100644 --- a/go-controller/pkg/testing/libovsdb/matchers.go +++ b/go-controller/pkg/testing/libovsdb/matchers.go @@ -9,7 +9,7 @@ import ( gomegaformat "github.com/onsi/gomega/format" gomegatypes "github.com/onsi/gomega/types" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" ) // isSetEqual compares a slice as an unordered set diff --git a/go-controller/pkg/testing/libovsdb/ops.go b/go-controller/pkg/testing/libovsdb/ops.go index 1926bbc3f5..de73c1d154 100644 --- a/go-controller/pkg/testing/libovsdb/ops.go +++ b/go-controller/pkg/testing/libovsdb/ops.go @@ -6,7 +6,7 @@ import ( "fmt" "hash/fnv" - libovsdbclient "github.com/ovn-org/libovsdb/client" + libovsdbclient "github.com/ovn-kubernetes/libovsdb/client" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/sbdb" ) diff --git a/go-controller/pkg/types/const.go b/go-controller/pkg/types/const.go index b06e337efd..523da8e27b 100644 --- a/go-controller/pkg/types/const.go +++ b/go-controller/pkg/types/const.go @@ -74,7 +74,7 @@ const ( TransitSwitchToRouterPrefix = "tstor-" RouterToTransitSwitchPrefix = "rtots-" - // ACL Default Tier Priorities + // DefaultACLTier Priorities // Default routed multicast allow acl rule priority DefaultRoutedMcastAllowPriority = 1013 @@ -91,7 +91,8 @@ const ( // Deny priority for isolated advertised networks AdvertisedNetworkDenyPriority = 1050 - // ACL PlaceHolderACL Tier Priorities + // PrimaryACLTier Priorities + PrimaryUDNAllowPriority = 1001 // Default deny acl rule priority PrimaryUDNDenyPriority = 1000 @@ -99,8 +100,6 @@ const ( // ACL Tiers // Tier 0 is called Primary as it is evaluated before any other feature-related Tiers. // Currently used for User Defined Network Feature. - // NOTE: When we upgrade from an OVN version without tiers to the new version with - // tiers, all values in the new ACL.Tier column will be set to 0. PrimaryACLTier = 0 // Default Tier for all ACLs DefaultACLTier = 2 @@ -188,6 +187,9 @@ const ( NodeModeDPU = "dpu" NodeModeDPUHost = "dpu-host" + // Gateway interface configuration + DeriveFromMgmtPort = "derive-from-mgmt-port" + // Geneve header length for IPv4 (https://github.com/openshift/cluster-network-operator/pull/720#issuecomment-664020823) GeneveHeaderLengthIPv4 = 58 // Geneve header length for IPv6 (https://github.com/openshift/cluster-network-operator/pull/720#issuecomment-664020823) @@ -310,11 +312,30 @@ const ( // CUDNPrefix of all CUDN network names CUDNPrefix = "cluster_udn_" - // NFTNoPMTUDRemoteNodeIPsv4 is a set used to track remote node IPs that do not belong to + // NFTRemoteNodeIPsv4 is a set used to track remote node v4IPs that do not belong to // the local node's subnet. - NFTNoPMTUDRemoteNodeIPsv4 = "no-pmtud-remote-node-ips-v4" + NFTRemoteNodeIPsv4 = "remote-node-ips-v4" - // NFTNoPMTUDRemoteNodeIPsv6 is a set used to track remote node IPs that do not belong to + // NFTRemoteNodeIPsv6 is a set used to track remote node v6IPs that do not belong to // the local node's subnet. - NFTNoPMTUDRemoteNodeIPsv6 = "no-pmtud-remote-node-ips-v6" + NFTRemoteNodeIPsv6 = "remote-node-ips-v6" + + // Metrics + MetricOvnkubeNamespace = "ovnkube" + MetricOvnkubeSubsystemController = "controller" + MetricOvnkubeSubsystemClusterManager = "clustermanager" + MetricOvnkubeSubsystemNode = "node" + MetricOvnNamespace = "ovn" + MetricOvnSubsystemDB = "db" + MetricOvnSubsystemNorthd = "northd" + MetricOvnSubsystemController = "controller" + MetricOvsNamespace = "ovs" + MetricOvsSubsystemVswitchd = "vswitchd" + MetricOvsSubsystemDB = "db" + + // "mgmtport-no-snat-subnets-v4" and "mgmtport-no-snat-subnets-v6" are sets containing + // subnets, indicating traffic that should not be SNATted when passing through the + // management port. + NFTMgmtPortNoSNATSubnetsV4 = "mgmtport-no-snat-subnets-v4" + NFTMgmtPortNoSNATSubnetsV6 = "mgmtport-no-snat-subnets-v6" ) diff --git a/go-controller/pkg/util/multi_network.go b/go-controller/pkg/util/multi_network.go index b1679462f3..bf1d3d6993 100644 --- a/go-controller/pkg/util/multi_network.go +++ b/go-controller/pkg/util/multi_network.go @@ -82,7 +82,6 @@ type NetInfo interface { GetNetworkScopedExtPortName(bridgeID, nodeName string) string GetNetworkScopedLoadBalancerName(lbName string) string GetNetworkScopedLoadBalancerGroupName(lbGroupName string) string - GetNetworkScopedClusterSubnetSNATMatch(nodeName string) string // GetNetInfo is an identity method used to get the specific NetInfo // implementation @@ -543,10 +542,6 @@ func (nInfo *DefaultNetInfo) GetNetworkScopedLoadBalancerGroupName(lbGroupName s return nInfo.GetNetworkScopedName(lbGroupName) } -func (nInfo *DefaultNetInfo) GetNetworkScopedClusterSubnetSNATMatch(_ string) string { - return "" -} - func (nInfo *DefaultNetInfo) canReconcile(netInfo NetInfo) bool { _, ok := netInfo.(*DefaultNetInfo) return ok @@ -709,7 +704,7 @@ func (nInfo *secondaryNetInfo) GetNetworkScopedGWRouterName(nodeName string) str func (nInfo *secondaryNetInfo) GetNetworkScopedSwitchName(nodeName string) string { // In Layer2Topology there is just one global switch if nInfo.TopologyType() == types.Layer2Topology { - return fmt.Sprintf("%s%s", nInfo.getPrefix(), types.OVNLayer2Switch) + return nInfo.GetNetworkScopedName(types.OVNLayer2Switch) } return nInfo.GetNetworkScopedName(nodeName) } @@ -738,13 +733,6 @@ func (nInfo *secondaryNetInfo) GetNetworkScopedLoadBalancerGroupName(lbGroupName return nInfo.GetNetworkScopedName(lbGroupName) } -func (nInfo *secondaryNetInfo) GetNetworkScopedClusterSubnetSNATMatch(nodeName string) string { - if nInfo.TopologyType() != types.Layer2Topology { - return "" - } - return fmt.Sprintf("outport == %q", types.GWRouterToExtSwitchPrefix+nInfo.GetNetworkScopedGWRouterName(nodeName)) -} - // getPrefix returns if the logical entities prefix for this network func (nInfo *secondaryNetInfo) getPrefix() string { return GetSecondaryNetworkPrefix(nInfo.netName) @@ -822,6 +810,11 @@ func (nInfo *secondaryNetInfo) canReconcile(other NetInfo) bool { if nInfo == nil && other == nil { return true } + // if network ID has changed, it means the network was re-created, and all controllers + // should execute delete+create instead of update + if nInfo.GetNetworkID() != types.InvalidID && other.GetNetworkID() != types.InvalidID && nInfo.GetNetworkID() != other.GetNetworkID() { + return false + } if nInfo.netName != other.GetNetworkName() { return false } @@ -1151,6 +1144,9 @@ func ParseNADInfo(nad *nettypes.NetworkAttachmentDefinition) (NetInfo, error) { func ParseNetConf(netattachdef *nettypes.NetworkAttachmentDefinition) (*ovncnitypes.NetConf, error) { netconf, err := config.ParseNetConf([]byte(netattachdef.Spec.Config)) if err != nil { + if err.Error() == ErrorAttachDefNotOvnManaged.Error() { + return nil, err + } return nil, fmt.Errorf("error parsing Network Attachment Definition %s/%s: %v", netattachdef.Namespace, netattachdef.Name, err) } @@ -1370,6 +1366,12 @@ func IsRouteAdvertisementsEnabled() bool { return config.OVNKubernetesFeature.EnableMultiNetwork && config.OVNKubernetesFeature.EnableRouteAdvertisements } +// IsPreconfiguredUDNAddressesEnabled indicates if user defined IPs / MAC +// addresses can be set in primary UDNs +func IsPreconfiguredUDNAddressesEnabled() bool { + return IsNetworkSegmentationSupportEnabled() && config.OVNKubernetesFeature.EnablePreconfiguredUDNAddresses +} + func DoesNetworkRequireIPAM(netInfo NetInfo) bool { return !((netInfo.TopologyType() == types.Layer2Topology || netInfo.TopologyType() == types.LocalnetTopology) && len(netInfo.Subnets()) == 0) } diff --git a/go-controller/pkg/util/multi_network_test.go b/go-controller/pkg/util/multi_network_test.go index 56f18d058a..daaaf920a5 100644 --- a/go-controller/pkg/util/multi_network_test.go +++ b/go-controller/pkg/util/multi_network_test.go @@ -180,7 +180,7 @@ func TestParseNetconf(t *testing.T) { "netAttachDefName": "default/tenantred" } `, - expectedError: fmt.Errorf("error parsing Network Attachment Definition ns1/nad1: net-attach-def not managed by OVN"), + expectedError: fmt.Errorf("net-attach-def not managed by OVN"), }, { desc: "attachment definition with IPAM key defined, using a wrong type", @@ -1154,6 +1154,16 @@ func TestSubnetOverlapCheck(t *testing.T) { } `, }, + { + desc: "return error when the network is not ovnk", + inputNetAttachDefConfigSpec: ` + { + "name": "test", + "type": "sriov-cni" + } + `, + expectedError: ErrorAttachDefNotOvnManaged, + }, } for _, test := range tests { diff --git a/go-controller/pkg/util/nad.go b/go-controller/pkg/util/nad.go new file mode 100644 index 0000000000..3a220e2b82 --- /dev/null +++ b/go-controller/pkg/util/nad.go @@ -0,0 +1,46 @@ +package util + +import ( + "context" + "fmt" + + nadtypes "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + nadclientset "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned" + nadlisters "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/listers/k8s.cni.cncf.io/v1" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" +) + +// EnsureDefaultNetworkNAD ensures that a well-known NAD exists for the +// default network in ovn-k namespace. This will allow the users to customize +// the primary UDN attachments with static IPs, and/or MAC address requests, by +// using the multus-cni `default network` feature. +func EnsureDefaultNetworkNAD(nadLister nadlisters.NetworkAttachmentDefinitionLister, nadClient nadclientset.Interface) (*nadtypes.NetworkAttachmentDefinition, error) { + nad, err := nadLister.NetworkAttachmentDefinitions(config.Kubernetes.OVNConfigNamespace).Get(types.DefaultNetworkName) + if err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + if nad != nil { + return nad, nil + } + return nadClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(config.Kubernetes.OVNConfigNamespace).Create( + context.Background(), + &nadtypes.NetworkAttachmentDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: types.DefaultNetworkName, + Namespace: config.Kubernetes.OVNConfigNamespace, + }, + Spec: nadtypes.NetworkAttachmentDefinitionSpec{ + Config: fmt.Sprintf("{\"cniVersion\": \"0.4.0\", \"name\": \"ovn-kubernetes\", \"type\": \"%s\"}", config.CNI.Plugin), + }, + }, + // note we don't set ourselves as field manager for this create as we + // want to process the resulting event that would otherwise be filtered + // out in nadNeedsUpdate + metav1.CreateOptions{}, + ) +} diff --git a/go-controller/pkg/util/node_annotations.go b/go-controller/pkg/util/node_annotations.go index d3be36f2db..cddd754d60 100644 --- a/go-controller/pkg/util/node_annotations.go +++ b/go-controller/pkg/util/node_annotations.go @@ -97,6 +97,9 @@ const ( // OVNNodeHostCIDRs is used to track the different host IP addresses and subnet masks on the node OVNNodeHostCIDRs = "k8s.ovn.org/host-cidrs" + // OVNNodePrimaryDPUHostAddr is used to track the primary DPU host address on the node + OVNNodePrimaryDPUHostAddr = "k8s.ovn.org/primary-dpu-host-addr" + // OVNNodeSecondaryHostEgressIPs contains EgressIP addresses that aren't managed by OVN. The EIP addresses are assigned to // standard linux interfaces and not interfaces of type OVS. OVNNodeSecondaryHostEgressIPs = "k8s.ovn.org/secondary-host-egress-ips" @@ -154,6 +157,9 @@ const ( // ovnNodeEncapIPs is used to indicate encap IPs set on the node OVNNodeEncapIPs = "k8s.ovn.org/node-encap-ips" + + // OvnNodeDontSNATSubnets is a user assigned source subnets that should avoid SNAT at ovn-k8s-mp0 interface + OvnNodeDontSNATSubnets = "k8s.ovn.org/node-ingress-snat-exclude-subnets" ) type L3GatewayConfig struct { @@ -1115,15 +1121,45 @@ func ParseNodeHostCIDRsExcludeOVNNetworks(node *corev1.Node) ([]string, error) { } func ParseNodeHostCIDRsList(node *corev1.Node) ([]string, error) { - addrAnnotation, ok := node.Annotations[OVNNodeHostCIDRs] + return parseNodeAnnotationList(node, OVNNodeHostCIDRs) +} + +func ParseNodeDontSNATSubnetsList(node *corev1.Node) ([]string, error) { + return parseNodeAnnotationList(node, OvnNodeDontSNATSubnets) +} + +// NodeDontSNATSubnetAnnotationChanged returns true if the OvnNodeDontSNATSubnets in the corev1.Nodes doesn't match +func NodeDontSNATSubnetAnnotationChanged(oldNode, newNode *corev1.Node) bool { + oldVal, oldOk := oldNode.Annotations[OvnNodeDontSNATSubnets] + newVal, newOk := newNode.Annotations[OvnNodeDontSNATSubnets] + + if oldOk != newOk { + return true + } + + if oldOk && newOk && oldVal != newVal { + return true + } + + return false +} + +// NodeDontSNATSubnetAnnotationExist returns true OvnNodeDontSNATSubnets annotation key exists in node annotation +func NodeDontSNATSubnetAnnotationExist(node *corev1.Node) bool { + _, ok := node.Annotations[OvnNodeDontSNATSubnets] + return ok +} + +func parseNodeAnnotationList(node *corev1.Node, annotationKey string) ([]string, error) { + annotationValue, ok := node.Annotations[annotationKey] if !ok { - return nil, newAnnotationNotSetError("%s annotation not found for node %q", OVNNodeHostCIDRs, node.Name) + return []string{}, nil } var cfg []string - if err := json.Unmarshal([]byte(addrAnnotation), &cfg); err != nil { - return nil, fmt.Errorf("failed to unmarshal host cidrs annotation %s for node %q: %v", - addrAnnotation, node.Name, err) + if err := json.Unmarshal([]byte(annotationValue), &cfg); err != nil { + return nil, fmt.Errorf("failed to unmarshal %s annotation %s for node %q: %v", + annotationKey, annotationValue, node.Name, err) } return cfg, nil } @@ -1501,3 +1537,39 @@ func ParseNodeEncapIPsAnnotation(node *corev1.Node) ([]string, error) { func NodeEncapIPsChanged(oldNode, newNode *corev1.Node) bool { return oldNode.Annotations[OVNNodeEncapIPs] != newNode.Annotations[OVNNodeEncapIPs] } + +// SetNodePrimaryDPUHostAddr sets the primary DPU host address annotation on a node +func SetNodePrimaryDPUHostAddr(nodeAnnotator kube.Annotator, ifAddrs []*net.IPNet) error { + nodeIPNetv4, _ := MatchFirstIPNetFamily(false, ifAddrs) + nodeIPNetv6, _ := MatchFirstIPNetFamily(true, ifAddrs) + + ifAddrAnnotation := ifAddr{} + if nodeIPNetv4 != nil { + ifAddrAnnotation.IPv4 = nodeIPNetv4.String() + } + if nodeIPNetv6 != nil { + ifAddrAnnotation.IPv6 = nodeIPNetv6.String() + } + return nodeAnnotator.Set(OVNNodePrimaryDPUHostAddr, ifAddrAnnotation) +} + +// NodePrimaryDPUHostAddrAnnotationChanged returns true if the primary DPU host address annotation changed +func NodePrimaryDPUHostAddrAnnotationChanged(oldNode, newNode *corev1.Node) bool { + return oldNode.Annotations[OVNNodePrimaryDPUHostAddr] != newNode.Annotations[OVNNodePrimaryDPUHostAddr] +} + +// GetNodePrimaryDPUHostAddrAnnotation returns the raw primary DPU host address annotation from a node +func GetNodePrimaryDPUHostAddrAnnotation(node *corev1.Node) (*ifAddr, error) { + addrAnnotation, ok := node.Annotations[OVNNodePrimaryDPUHostAddr] + if !ok { + return nil, newAnnotationNotSetError("%s annotation not found for node %q", OVNNodePrimaryDPUHostAddr, node.Name) + } + nodeIfAddr := &ifAddr{} + if err := json.Unmarshal([]byte(addrAnnotation), nodeIfAddr); err != nil { + return nil, fmt.Errorf("failed to unmarshal annotation: %s for node %q, err: %v", OVNNodePrimaryDPUHostAddr, node.Name, err) + } + if nodeIfAddr.IPv4 == "" && nodeIfAddr.IPv6 == "" { + return nil, fmt.Errorf("node: %q does not have any IP information set", node.Name) + } + return nodeIfAddr, nil +} diff --git a/go-controller/pkg/util/node_annotations_unit_test.go b/go-controller/pkg/util/node_annotations_unit_test.go index 463a83f31b..f987684e82 100644 --- a/go-controller/pkg/util/node_annotations_unit_test.go +++ b/go-controller/pkg/util/node_annotations_unit_test.go @@ -829,3 +829,174 @@ func TestParseUDNLayer2NodeGRLRPTunnelIDs(t *testing.T) { }) } } + +func TestNodeDontSNATSubnetAnnotationChanged(t *testing.T) { + tests := []struct { + desc string + oldNode *corev1.Node + newNode *corev1.Node + result bool + }{ + { + desc: "annotation added", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + newNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `["192.168.1.0/24"]`, + }, + }, + }, + result: true, + }, + { + desc: "annotation removed", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `["192.168.1.0/24"]`, + }, + }, + }, + newNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + result: true, + }, + { + desc: "annotation value changed", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `["192.168.1.0/24"]`, + }, + }, + }, + newNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `["10.0.0.0/16"]`, + }, + }, + }, + result: true, + }, + { + desc: "false: annotation unchanged", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `["192.168.1.0/24"]`, + }, + }, + }, + newNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `["192.168.1.0/24"]`, + }, + }, + }, + result: false, + }, + { + desc: "annotation absent in both", + oldNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + newNode: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + result: false, + }, + } + + for i, tc := range tests { + t.Run(fmt.Sprintf("%d:%s", i, tc.desc), func(t *testing.T) { + result := NodeDontSNATSubnetAnnotationChanged(tc.oldNode, tc.newNode) + assert.Equal(t, tc.result, result) + }) + } +} + +func TestParseNodeDontSNATSubnetsList(t *testing.T) { + tests := []struct { + desc string + node *corev1.Node + expected []string + expectError bool + }{ + { + desc: "no annotation present", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-node", + Annotations: map[string]string{}, + }, + }, + expected: []string{}, + expectError: false, + }, + { + desc: "valid annotation list with IPv4 and IPv6 CIDRs", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `["192.168.1.0/24", "fd00::/64", "10.0.0.0/16"]`, + }, + }, + }, + expected: []string{"192.168.1.0/24", "fd00::/64", "10.0.0.0/16"}, + expectError: false, + }, + { + desc: "invalid annotation value (not JSON)", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node3", + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `not-a-json`, + }, + }, + }, + expected: nil, + expectError: true, + }, + { + desc: "empty JSON array annotation", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node4", + Annotations: map[string]string{ + OvnNodeDontSNATSubnets: `[]`, + }, + }, + }, + expected: []string{}, + expectError: false, + }, + } + + for i, tc := range tests { + t.Run(fmt.Sprintf("%d:%s", i, tc.desc), func(t *testing.T) { + result, err := ParseNodeDontSNATSubnetsList(tc.node) + if tc.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tc.expected, result) + } + }) + } +} diff --git a/go-controller/pkg/util/ovs.go b/go-controller/pkg/util/ovs.go index ff21e828db..f6e3ca0ad0 100644 --- a/go-controller/pkg/util/ovs.go +++ b/go-controller/pkg/util/ovs.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "net" "regexp" "runtime" "strings" @@ -377,7 +378,7 @@ func RunOVNAppctlWithTimeout(timeout int, args ...string) (string, string, error // Run the ovn-ctl command and retry if "Connection refused" // poll waitng for service to become available -// FIXME: Remove when https://github.com/ovn-org/libovsdb/issues/235 is fixed +// FIXME: Remove when https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed func runOVNretry(cmdPath string, envVars []string, args ...string) (*bytes.Buffer, *bytes.Buffer, error) { retriesLeft := ovnCmdRetryCount @@ -434,14 +435,14 @@ func getNbOVSDBArgs(command string, args ...string) []string { } // RunOVNNbctlWithTimeout runs command via ovn-nbctl with a specific timeout -// FIXME: Remove when https://github.com/ovn-org/libovsdb/issues/235 is fixed +// FIXME: Remove when https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed func RunOVNNbctlWithTimeout(timeout int, args ...string) (string, string, error) { stdout, stderr, err := RunOVNNbctlRawOutput(timeout, args...) return strings.Trim(strings.TrimSpace(stdout), "\""), stderr, err } // RunOVNNbctlRawOutput returns the output with no trimming or other string manipulation -// FIXME: Remove when https://github.com/ovn-org/libovsdb/issues/235 is fixed +// FIXME: Remove when https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed func RunOVNNbctlRawOutput(timeout int, args ...string) (string, string, error) { cmdArgs, envVars := getNbctlArgsAndEnv(timeout, args...) stdout, stderr, err := runOVNretry(runner.nbctlPath, envVars, cmdArgs...) @@ -449,13 +450,13 @@ func RunOVNNbctlRawOutput(timeout int, args ...string) (string, string, error) { } // RunOVNNbctl runs a command via ovn-nbctl. -// FIXME: Remove when https://github.com/ovn-org/libovsdb/issues/235 is fixed +// FIXME: Remove when https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed func RunOVNNbctl(args ...string) (string, string, error) { return RunOVNNbctlWithTimeout(ovsCommandTimeout, args...) } // RunOVNSbctlWithTimeout runs command via ovn-sbctl with a specific timeout -// FIXME: Remove when https://github.com/ovn-org/libovsdb/issues/235 is fixed +// FIXME: Remove when https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed func RunOVNSbctlWithTimeout(timeout int, args ...string) (string, string, error) { var cmdArgs []string @@ -499,7 +500,7 @@ func RunOVSDBClientOVNNB(command string, args ...string) (string, string, error) } // RunOVNSbctl runs a command via ovn-sbctl. -// FIXME: Remove when https://github.com/ovn-org/libovsdb/issues/235 is fixed +// FIXME: Remove when https://github.com/ovn-kubernetes/libovsdb/issues/235 is fixed func RunOVNSbctl(args ...string) (string, string, error) { return RunOVNSbctlWithTimeout(ovsCommandTimeout, args...) } @@ -819,6 +820,18 @@ func DetectCheckPktLengthSupport(bridge string) (bool, error) { return false, nil } +// SetStaticFDBEntry programs a static MAC entry into the OVS FIB and disables MAC learning for this entry +func SetStaticFDBEntry(bridge, port string, mac net.HardwareAddr) error { + // Assume default VLAN for local port + vlan := "0" + stdout, stderr, err := RunOVSAppctl("fdb/add", bridge, port, vlan, mac.String()) + if err != nil { + return fmt.Errorf("failed to add FDB entry to OVS for LOCAL port, "+ + "stdout: %q, stderr: %q, error: %v", stdout, stderr, err) + } + return nil +} + // IsOvsHwOffloadEnabled checks if OvS Hardware Offload is enabled. func IsOvsHwOffloadEnabled() (bool, error) { stdout, stderr, err := RunOVSVsctl("--if-exists", "get", diff --git a/go-controller/pkg/vswitchd/autoattach.go b/go-controller/pkg/vswitchd/autoattach.go index b9655736aa..e54dbba3ae 100644 --- a/go-controller/pkg/vswitchd/autoattach.go +++ b/go-controller/pkg/vswitchd/autoattach.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const AutoAttachTable = "AutoAttach" diff --git a/go-controller/pkg/vswitchd/bridge.go b/go-controller/pkg/vswitchd/bridge.go index 8953faa3f2..14997f995b 100644 --- a/go-controller/pkg/vswitchd/bridge.go +++ b/go-controller/pkg/vswitchd/bridge.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const BridgeTable = "Bridge" diff --git a/go-controller/pkg/vswitchd/controller.go b/go-controller/pkg/vswitchd/controller.go index 1b38c989bf..ff02062eaa 100644 --- a/go-controller/pkg/vswitchd/controller.go +++ b/go-controller/pkg/vswitchd/controller.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ControllerTable = "Controller" diff --git a/go-controller/pkg/vswitchd/ct_timeout_policy.go b/go-controller/pkg/vswitchd/ct_timeout_policy.go index 98bf690498..150db9b2f7 100644 --- a/go-controller/pkg/vswitchd/ct_timeout_policy.go +++ b/go-controller/pkg/vswitchd/ct_timeout_policy.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const CTTimeoutPolicyTable = "CT_Timeout_Policy" diff --git a/go-controller/pkg/vswitchd/ct_zone.go b/go-controller/pkg/vswitchd/ct_zone.go index 4eaba845c4..6868191974 100644 --- a/go-controller/pkg/vswitchd/ct_zone.go +++ b/go-controller/pkg/vswitchd/ct_zone.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const CTZoneTable = "CT_Zone" diff --git a/go-controller/pkg/vswitchd/datapath.go b/go-controller/pkg/vswitchd/datapath.go index 71a995f93e..899f5d3531 100644 --- a/go-controller/pkg/vswitchd/datapath.go +++ b/go-controller/pkg/vswitchd/datapath.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DatapathTable = "Datapath" diff --git a/go-controller/pkg/vswitchd/flow_sample_collector_set.go b/go-controller/pkg/vswitchd/flow_sample_collector_set.go index 2c90f5d438..8c975711a5 100644 --- a/go-controller/pkg/vswitchd/flow_sample_collector_set.go +++ b/go-controller/pkg/vswitchd/flow_sample_collector_set.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const FlowSampleCollectorSetTable = "Flow_Sample_Collector_Set" diff --git a/go-controller/pkg/vswitchd/flow_table.go b/go-controller/pkg/vswitchd/flow_table.go index 42d49d2f58..911b6fbb1d 100644 --- a/go-controller/pkg/vswitchd/flow_table.go +++ b/go-controller/pkg/vswitchd/flow_table.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const FlowTableTable = "Flow_Table" diff --git a/go-controller/pkg/vswitchd/interface.go b/go-controller/pkg/vswitchd/interface.go index e6f67ba9c7..6f89cc5d1a 100644 --- a/go-controller/pkg/vswitchd/interface.go +++ b/go-controller/pkg/vswitchd/interface.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const InterfaceTable = "Interface" diff --git a/go-controller/pkg/vswitchd/ipfix.go b/go-controller/pkg/vswitchd/ipfix.go index 72b5d3915c..8ea91c8fd1 100644 --- a/go-controller/pkg/vswitchd/ipfix.go +++ b/go-controller/pkg/vswitchd/ipfix.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const IPFIXTable = "IPFIX" diff --git a/go-controller/pkg/vswitchd/manager.go b/go-controller/pkg/vswitchd/manager.go index ff1df96caa..45a9dcb609 100644 --- a/go-controller/pkg/vswitchd/manager.go +++ b/go-controller/pkg/vswitchd/manager.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const ManagerTable = "Manager" diff --git a/go-controller/pkg/vswitchd/mirror.go b/go-controller/pkg/vswitchd/mirror.go index 044455d253..2bab171097 100644 --- a/go-controller/pkg/vswitchd/mirror.go +++ b/go-controller/pkg/vswitchd/mirror.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const MirrorTable = "Mirror" diff --git a/go-controller/pkg/vswitchd/model.go b/go-controller/pkg/vswitchd/model.go index c862f04277..20b8d0cc94 100644 --- a/go-controller/pkg/vswitchd/model.go +++ b/go-controller/pkg/vswitchd/model.go @@ -6,8 +6,8 @@ package vswitchd import ( "encoding/json" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // FullDatabaseModel returns the DatabaseModel object to be used in libovsdb diff --git a/go-controller/pkg/vswitchd/netflow.go b/go-controller/pkg/vswitchd/netflow.go index f958587044..d1f05029fd 100644 --- a/go-controller/pkg/vswitchd/netflow.go +++ b/go-controller/pkg/vswitchd/netflow.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const NetFlowTable = "NetFlow" diff --git a/go-controller/pkg/vswitchd/open_vswitch.go b/go-controller/pkg/vswitchd/open_vswitch.go index e8ea481d5b..e8a1456fe9 100644 --- a/go-controller/pkg/vswitchd/open_vswitch.go +++ b/go-controller/pkg/vswitchd/open_vswitch.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const OpenvSwitchTable = "Open_vSwitch" diff --git a/go-controller/pkg/vswitchd/port.go b/go-controller/pkg/vswitchd/port.go index cf0ba96153..6aa3350c93 100644 --- a/go-controller/pkg/vswitchd/port.go +++ b/go-controller/pkg/vswitchd/port.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const PortTable = "Port" diff --git a/go-controller/pkg/vswitchd/qos.go b/go-controller/pkg/vswitchd/qos.go index aa1c9dd004..0ac14541d9 100644 --- a/go-controller/pkg/vswitchd/qos.go +++ b/go-controller/pkg/vswitchd/qos.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const QoSTable = "QoS" diff --git a/go-controller/pkg/vswitchd/queue.go b/go-controller/pkg/vswitchd/queue.go index e8615e9cf7..60094eb8c2 100644 --- a/go-controller/pkg/vswitchd/queue.go +++ b/go-controller/pkg/vswitchd/queue.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const QueueTable = "Queue" diff --git a/go-controller/pkg/vswitchd/sflow.go b/go-controller/pkg/vswitchd/sflow.go index fcbcc8569e..58841d7877 100644 --- a/go-controller/pkg/vswitchd/sflow.go +++ b/go-controller/pkg/vswitchd/sflow.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SFlowTable = "sFlow" diff --git a/go-controller/pkg/vswitchd/ssl.go b/go-controller/pkg/vswitchd/ssl.go index 79c4b1bad4..84dfbd1f33 100644 --- a/go-controller/pkg/vswitchd/ssl.go +++ b/go-controller/pkg/vswitchd/ssl.go @@ -3,7 +3,7 @@ package vswitchd -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const SSLTable = "SSL" diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/clientset.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/clientset.go index f374a5c511..6f4518f097 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/clientset.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ limitations under the License. package versioned import ( - "fmt" - "net/http" + fmt "fmt" + http "net/http" k8sv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1" discovery "k8s.io/client-go/discovery" diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go index a67d14acb8..eb8da4c265 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -31,8 +31,12 @@ import ( // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/doc.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/doc.go index 44e8061b76..64c6b6be35 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/doc.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/register.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/register.go index 3cdc1ac5b1..e6f64d71b9 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/register.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/doc.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/doc.go index 743391c14b..8514bb55f2 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/doc.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/register.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/register.go index d6a1737fdb..522a30ca3e 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/register.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/doc.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/doc.go index faa8377ce2..19ad6aefe7 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/doc.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/doc.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/doc.go index b38fd4c55d..33fd99c15d 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/doc.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaim.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaim.go index 00db990cf9..e410e0b7e3 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaim.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,123 +19,32 @@ limitations under the License. package fake import ( - "context" - v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" + ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1" + gentype "k8s.io/client-go/gentype" ) -// FakeIPAMClaims implements IPAMClaimInterface -type FakeIPAMClaims struct { +// fakeIPAMClaims implements IPAMClaimInterface +type fakeIPAMClaims struct { + *gentype.FakeClientWithList[*v1alpha1.IPAMClaim, *v1alpha1.IPAMClaimList] Fake *FakeK8sV1alpha1 - ns string -} - -var ipamclaimsResource = v1alpha1.SchemeGroupVersion.WithResource("ipamclaims") - -var ipamclaimsKind = v1alpha1.SchemeGroupVersion.WithKind("IPAMClaim") - -// Get takes name of the iPAMClaim, and returns the corresponding iPAMClaim object, and an error if there is any. -func (c *FakeIPAMClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAMClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(ipamclaimsResource, c.ns, name), &v1alpha1.IPAMClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.IPAMClaim), err -} - -// List takes label and field selectors, and returns the list of IPAMClaims that match those selectors. -func (c *FakeIPAMClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAMClaimList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(ipamclaimsResource, ipamclaimsKind, c.ns, opts), &v1alpha1.IPAMClaimList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.IPAMClaimList{ListMeta: obj.(*v1alpha1.IPAMClaimList).ListMeta} - for _, item := range obj.(*v1alpha1.IPAMClaimList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested iPAMClaims. -func (c *FakeIPAMClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(ipamclaimsResource, c.ns, opts)) - -} - -// Create takes the representation of a iPAMClaim and creates it. Returns the server's representation of the iPAMClaim, and an error, if there is any. -func (c *FakeIPAMClaims) Create(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.CreateOptions) (result *v1alpha1.IPAMClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(ipamclaimsResource, c.ns, iPAMClaim), &v1alpha1.IPAMClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.IPAMClaim), err -} - -// Update takes the representation of a iPAMClaim and updates it. Returns the server's representation of the iPAMClaim, and an error, if there is any. -func (c *FakeIPAMClaims) Update(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (result *v1alpha1.IPAMClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(ipamclaimsResource, c.ns, iPAMClaim), &v1alpha1.IPAMClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.IPAMClaim), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeIPAMClaims) UpdateStatus(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (*v1alpha1.IPAMClaim, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(ipamclaimsResource, "status", c.ns, iPAMClaim), &v1alpha1.IPAMClaim{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.IPAMClaim), err -} - -// Delete takes name of the iPAMClaim and deletes it. Returns an error if one occurs. -func (c *FakeIPAMClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(ipamclaimsResource, c.ns, name, opts), &v1alpha1.IPAMClaim{}) - - return err } -// DeleteCollection deletes a collection of objects. -func (c *FakeIPAMClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(ipamclaimsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.IPAMClaimList{}) - return err -} - -// Patch applies the patch and returns the patched iPAMClaim. -func (c *FakeIPAMClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAMClaim, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(ipamclaimsResource, c.ns, name, pt, data, subresources...), &v1alpha1.IPAMClaim{}) - - if obj == nil { - return nil, err +func newFakeIPAMClaims(fake *FakeK8sV1alpha1, namespace string) ipamclaimsv1alpha1.IPAMClaimInterface { + return &fakeIPAMClaims{ + gentype.NewFakeClientWithList[*v1alpha1.IPAMClaim, *v1alpha1.IPAMClaimList]( + fake.Fake, + namespace, + v1alpha1.SchemeGroupVersion.WithResource("ipamclaims"), + v1alpha1.SchemeGroupVersion.WithKind("IPAMClaim"), + func() *v1alpha1.IPAMClaim { return &v1alpha1.IPAMClaim{} }, + func() *v1alpha1.IPAMClaimList { return &v1alpha1.IPAMClaimList{} }, + func(dst, src *v1alpha1.IPAMClaimList) { dst.ListMeta = src.ListMeta }, + func(list *v1alpha1.IPAMClaimList) []*v1alpha1.IPAMClaim { return gentype.ToPointerSlice(list.Items) }, + func(list *v1alpha1.IPAMClaimList, items []*v1alpha1.IPAMClaim) { + list.Items = gentype.FromPointerSlice(items) + }, + ), + fake, } - return obj.(*v1alpha1.IPAMClaim), err } diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaims_client.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaims_client.go index adc0c545ed..65c4b4c979 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaims_client.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/fake/fake_ipamclaims_client.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ type FakeK8sV1alpha1 struct { } func (c *FakeK8sV1alpha1) IPAMClaims(namespace string) v1alpha1.IPAMClaimInterface { - return &FakeIPAMClaims{c, namespace} + return newFakeIPAMClaims(c, namespace) } // RESTClient returns a RESTClient that is used to communicate diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/generated_expansion.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/generated_expansion.go index c5c3006e82..b70abd3102 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/generated_expansion.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaim.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaim.go index bfc26c0c5a..f4d088c1b9 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaim.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,15 +19,14 @@ limitations under the License. package v1alpha1 import ( - "context" - "time" + context "context" - v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" + ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" scheme "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // IPAMClaimsGetter has a method to return a IPAMClaimInterface. @@ -38,158 +37,34 @@ type IPAMClaimsGetter interface { // IPAMClaimInterface has methods to work with IPAMClaim resources. type IPAMClaimInterface interface { - Create(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.CreateOptions) (*v1alpha1.IPAMClaim, error) - Update(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (*v1alpha1.IPAMClaim, error) - UpdateStatus(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (*v1alpha1.IPAMClaim, error) + Create(ctx context.Context, iPAMClaim *ipamclaimsv1alpha1.IPAMClaim, opts v1.CreateOptions) (*ipamclaimsv1alpha1.IPAMClaim, error) + Update(ctx context.Context, iPAMClaim *ipamclaimsv1alpha1.IPAMClaim, opts v1.UpdateOptions) (*ipamclaimsv1alpha1.IPAMClaim, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, iPAMClaim *ipamclaimsv1alpha1.IPAMClaim, opts v1.UpdateOptions) (*ipamclaimsv1alpha1.IPAMClaim, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.IPAMClaim, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.IPAMClaimList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ipamclaimsv1alpha1.IPAMClaim, error) + List(ctx context.Context, opts v1.ListOptions) (*ipamclaimsv1alpha1.IPAMClaimList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAMClaim, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ipamclaimsv1alpha1.IPAMClaim, err error) IPAMClaimExpansion } // iPAMClaims implements IPAMClaimInterface type iPAMClaims struct { - client rest.Interface - ns string + *gentype.ClientWithList[*ipamclaimsv1alpha1.IPAMClaim, *ipamclaimsv1alpha1.IPAMClaimList] } // newIPAMClaims returns a IPAMClaims func newIPAMClaims(c *K8sV1alpha1Client, namespace string) *iPAMClaims { return &iPAMClaims{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*ipamclaimsv1alpha1.IPAMClaim, *ipamclaimsv1alpha1.IPAMClaimList]( + "ipamclaims", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *ipamclaimsv1alpha1.IPAMClaim { return &ipamclaimsv1alpha1.IPAMClaim{} }, + func() *ipamclaimsv1alpha1.IPAMClaimList { return &ipamclaimsv1alpha1.IPAMClaimList{} }, + ), } } - -// Get takes name of the iPAMClaim, and returns the corresponding iPAMClaim object, and an error if there is any. -func (c *iPAMClaims) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.IPAMClaim, err error) { - result = &v1alpha1.IPAMClaim{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ipamclaims"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of IPAMClaims that match those selectors. -func (c *iPAMClaims) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.IPAMClaimList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.IPAMClaimList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ipamclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested iPAMClaims. -func (c *iPAMClaims) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ipamclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a iPAMClaim and creates it. Returns the server's representation of the iPAMClaim, and an error, if there is any. -func (c *iPAMClaims) Create(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.CreateOptions) (result *v1alpha1.IPAMClaim, err error) { - result = &v1alpha1.IPAMClaim{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ipamclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(iPAMClaim). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a iPAMClaim and updates it. Returns the server's representation of the iPAMClaim, and an error, if there is any. -func (c *iPAMClaims) Update(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (result *v1alpha1.IPAMClaim, err error) { - result = &v1alpha1.IPAMClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ipamclaims"). - Name(iPAMClaim.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(iPAMClaim). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *iPAMClaims) UpdateStatus(ctx context.Context, iPAMClaim *v1alpha1.IPAMClaim, opts v1.UpdateOptions) (result *v1alpha1.IPAMClaim, err error) { - result = &v1alpha1.IPAMClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ipamclaims"). - Name(iPAMClaim.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(iPAMClaim). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the iPAMClaim and deletes it. Returns an error if one occurs. -func (c *iPAMClaims) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ipamclaims"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *iPAMClaims) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ipamclaims"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched iPAMClaim. -func (c *iPAMClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.IPAMClaim, err error) { - result = &v1alpha1.IPAMClaim{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ipamclaims"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaims_client.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaims_client.go index d6b8684d89..3545777356 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaims_client.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/typed/ipamclaims/v1alpha1/ipamclaims_client.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - "net/http" + http "net/http" - v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" - "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme" + ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" + scheme "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) @@ -85,10 +85,10 @@ func New(c rest.Interface) *K8sV1alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion + gv := ipamclaimsv1alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go index 8ba00a69fc..7efe7e95a6 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -42,6 +42,7 @@ type sharedInformerFactory struct { lock sync.Mutex defaultResync time.Duration customResync map[reflect.Type]time.Duration + transform cache.TransformFunc informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -80,6 +81,14 @@ func WithNamespace(namespace string) SharedInformerOption { } } +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -184,6 +193,7 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal } informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) f.informers[informerType] = informer return informer @@ -218,6 +228,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/generic.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/generic.go index 94f709e9bb..d5dabd6983 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/generic.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ limitations under the License. package externalversions import ( - "fmt" + fmt "fmt" v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go index 8d1429d5f3..cb5a445987 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/interface.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/interface.go index c93d99e4be..b2cad1c067 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/interface.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/interface.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/interface.go index 1ab51a9ed7..455310ee4d 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/interface.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/ipamclaim.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/ipamclaim.go index fd46dc78b7..8caa586ce5 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/ipamclaim.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/ipamclaims/v1alpha1/ipamclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,13 +19,13 @@ limitations under the License. package v1alpha1 import ( - "context" + context "context" time "time" - ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" + crdipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" versioned "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned" internalinterfaces "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1" + ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -36,7 +36,7 @@ import ( // IPAMClaims. type IPAMClaimInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.IPAMClaimLister + Lister() ipamclaimsv1alpha1.IPAMClaimLister } type iPAMClaimInformer struct { @@ -71,7 +71,7 @@ func NewFilteredIPAMClaimInformer(client versioned.Interface, namespace string, return client.K8sV1alpha1().IPAMClaims(namespace).Watch(context.TODO(), options) }, }, - &ipamclaimsv1alpha1.IPAMClaim{}, + &crdipamclaimsv1alpha1.IPAMClaim{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *iPAMClaimInformer) defaultInformer(client versioned.Interface, resyncPe } func (f *iPAMClaimInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ipamclaimsv1alpha1.IPAMClaim{}, f.defaultInformer) + return f.factory.InformerFor(&crdipamclaimsv1alpha1.IPAMClaim{}, f.defaultInformer) } -func (f *iPAMClaimInformer) Lister() v1alpha1.IPAMClaimLister { - return v1alpha1.NewIPAMClaimLister(f.Informer().GetIndexer()) +func (f *iPAMClaimInformer) Lister() ipamclaimsv1alpha1.IPAMClaimLister { + return ipamclaimsv1alpha1.NewIPAMClaimLister(f.Informer().GetIndexer()) } diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/expansion_generated.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/expansion_generated.go index 086ab4ab65..bb37e41381 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/expansion_generated.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/ipamclaim.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/ipamclaim.go index 409fc70d06..474e11b48e 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/ipamclaim.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers/ipamclaims/v1alpha1/ipamclaim.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The Kubernetes Authors +Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,10 +19,10 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // IPAMClaimLister helps list IPAMClaims. @@ -30,7 +30,7 @@ import ( type IPAMClaimLister interface { // List lists all IPAMClaims in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.IPAMClaim, err error) + List(selector labels.Selector) (ret []*ipamclaimsv1alpha1.IPAMClaim, err error) // IPAMClaims returns an object that can list and get IPAMClaims. IPAMClaims(namespace string) IPAMClaimNamespaceLister IPAMClaimListerExpansion @@ -38,25 +38,17 @@ type IPAMClaimLister interface { // iPAMClaimLister implements the IPAMClaimLister interface. type iPAMClaimLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ipamclaimsv1alpha1.IPAMClaim] } // NewIPAMClaimLister returns a new IPAMClaimLister. func NewIPAMClaimLister(indexer cache.Indexer) IPAMClaimLister { - return &iPAMClaimLister{indexer: indexer} -} - -// List lists all IPAMClaims in the indexer. -func (s *iPAMClaimLister) List(selector labels.Selector) (ret []*v1alpha1.IPAMClaim, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.IPAMClaim)) - }) - return ret, err + return &iPAMClaimLister{listers.New[*ipamclaimsv1alpha1.IPAMClaim](indexer, ipamclaimsv1alpha1.Resource("ipamclaim"))} } // IPAMClaims returns an object that can list and get IPAMClaims. func (s *iPAMClaimLister) IPAMClaims(namespace string) IPAMClaimNamespaceLister { - return iPAMClaimNamespaceLister{indexer: s.indexer, namespace: namespace} + return iPAMClaimNamespaceLister{listers.NewNamespaced[*ipamclaimsv1alpha1.IPAMClaim](s.ResourceIndexer, namespace)} } // IPAMClaimNamespaceLister helps list and get IPAMClaims. @@ -64,36 +56,15 @@ func (s *iPAMClaimLister) IPAMClaims(namespace string) IPAMClaimNamespaceLister type IPAMClaimNamespaceLister interface { // List lists all IPAMClaims in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.IPAMClaim, err error) + List(selector labels.Selector) (ret []*ipamclaimsv1alpha1.IPAMClaim, err error) // Get retrieves the IPAMClaim from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.IPAMClaim, error) + Get(name string) (*ipamclaimsv1alpha1.IPAMClaim, error) IPAMClaimNamespaceListerExpansion } // iPAMClaimNamespaceLister implements the IPAMClaimNamespaceLister // interface. type iPAMClaimNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all IPAMClaims in the indexer for a given namespace. -func (s iPAMClaimNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.IPAMClaim, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.IPAMClaim)) - }) - return ret, err -} - -// Get retrieves the IPAMClaim from the indexer for a given namespace and name. -func (s iPAMClaimNamespaceLister) Get(name string) (*v1alpha1.IPAMClaim, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("ipamclaim"), name) - } - return obj.(*v1alpha1.IPAMClaim), nil + listers.ResourceIndexer[*ipamclaimsv1alpha1.IPAMClaim] } diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/types.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/types.go index ca94219215..bb4fc0e97d 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/types.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/types.go @@ -4,13 +4,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -//go:generate go run sigs.k8s.io/controller-tools/cmd/controller-gen@v0.13.0 paths=./... object crd output:artifacts:code=./,config=../../../../artifacts +//go:generate go run sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.5 paths=./... object crd output:artifacts:code=./,config=../../../../artifacts -//go:generate go run k8s.io/code-generator/cmd/client-gen@v0.28.0 client-gen --go-header-file ../../../../hack/custom-boilerplate.go.txt --clientset-name versioned --input-base "" --input github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 --output-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset .. +//go:generate go run k8s.io/code-generator/cmd/client-gen@v0.32.5 --go-header-file ../../../../hack/custom-boilerplate.go.txt --clientset-name versioned --input-base "" --input github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 --output-pkg github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset --output-dir ./apis/clientset .. -//go:generate go run k8s.io/code-generator/cmd/lister-gen@v0.28.0 lister-gen --go-header-file ../../../../hack/custom-boilerplate.go.txt --input-dirs github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 --output-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers .. +//go:generate go run k8s.io/code-generator/cmd/lister-gen@v0.32.5 --go-header-file ../../../../hack/custom-boilerplate.go.txt --output-pkg github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers --output-dir ./apis/listers ./ -//go:generate go run k8s.io/code-generator/cmd/informer-gen@v0.28.0 informer-gen --go-header-file ../../../../hack/custom-boilerplate.go.txt --input-dirs github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 --versioned-clientset-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned --listers-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers --output-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers .. +//go:generate go run k8s.io/code-generator/cmd/informer-gen@v0.32.5 --go-header-file ../../../../hack/custom-boilerplate.go.txt --versioned-clientset-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned --listers-package github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/listers --output-pkg github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/informers --output-dir ./apis/informers ./ // +genclient // +kubebuilder:object:root=true @@ -35,9 +35,14 @@ type IPAMClaimSpec struct { Interface string `json:"interface"` } +// IPAMClaimStatus contains the observed status of the IPAMClaim. type IPAMClaimStatus struct { // The list of IP addresses (v4, v6) that were allocated for the pod interface IPs []string `json:"ips"` + // The name of the pod holding the IPAMClaim + OwnerPod OwnerPod `json:"ownerPod,omitempty"` + // Conditions contains details for one aspect of the current state of this API Resource + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -47,3 +52,7 @@ type IPAMClaimList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []IPAMClaim `json:"items"` } + +type OwnerPod struct { + Name string `json:"name"` +} diff --git a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/zz_generated.deepcopy.go b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/zz_generated.deepcopy.go index 737efd7a84..d68e38c3ee 100644 --- a/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/zz_generated.deepcopy.go +++ b/go-controller/vendor/github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/zz_generated.deepcopy.go @@ -5,6 +5,7 @@ package v1alpha1 import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -90,6 +91,14 @@ func (in *IPAMClaimStatus) DeepCopyInto(out *IPAMClaimStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + out.OwnerPod = in.OwnerPod + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMClaimStatus. @@ -101,3 +110,18 @@ func (in *IPAMClaimStatus) DeepCopy() *IPAMClaimStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OwnerPod) DeepCopyInto(out *OwnerPod) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerPod. +func (in *OwnerPod) DeepCopy() *OwnerPod { + if in == nil { + return nil + } + out := new(OwnerPod) + in.DeepCopyInto(out) + return out +} diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/LICENSE b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/LICENSE similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/LICENSE rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/LICENSE diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/NOTICE b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/NOTICE similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/NOTICE rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/NOTICE diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/cache/cache.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/cache/cache.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/cache/cache.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/cache/cache.go index 0b1e09e721..ffe871fd3e 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/cache/cache.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/cache/cache.go @@ -15,10 +15,10 @@ import ( "github.com/go-logr/logr" "github.com/go-logr/stdr" - "github.com/ovn-org/libovsdb/mapper" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" - "github.com/ovn-org/libovsdb/updates" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/updates" ) const ( diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/cache/doc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/cache/doc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/cache/doc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/cache/doc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/cache/uuidset.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/cache/uuidset.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/cache/uuidset.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/api.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/api.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/api.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/api.go index 4977589442..f6a8d6fb34 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/client/api.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/api.go @@ -7,9 +7,9 @@ import ( "reflect" "github.com/go-logr/logr" - "github.com/ovn-org/libovsdb/cache" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // API defines basic operations to interact with the database diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/api_test_model.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/api_test_model.go index 36ea476e08..7a97b6d08c 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/client/api_test_model.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/api_test_model.go @@ -4,9 +4,9 @@ import ( "encoding/json" "testing" - "github.com/ovn-org/libovsdb/cache" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" "github.com/stretchr/testify/assert" ) diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/client.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/client.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/client.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/client.go index 10ea757ec7..3926ad6ddf 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/client/client.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/client.go @@ -20,11 +20,11 @@ import ( "github.com/cenkalti/rpc2/jsonrpc" "github.com/go-logr/logr" "github.com/go-logr/stdr" - "github.com/ovn-org/libovsdb/cache" - "github.com/ovn-org/libovsdb/mapper" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" - "github.com/ovn-org/libovsdb/ovsdb/serverdb" + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb" ) // Constants defined for libovsdb diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/condition.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/condition.go similarity index 97% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/condition.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/condition.go index 1dfabda02e..1269339cea 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/client/condition.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/condition.go @@ -4,10 +4,10 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/cache" - "github.com/ovn-org/libovsdb/mapper" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // Conditional is the interface used by the ConditionalAPI to match on cache objects diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/config.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/config.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/config.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/config.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/doc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/doc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/doc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/doc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/metrics.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/metrics.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/metrics.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/metrics.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/monitor.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/monitor.go similarity index 97% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/monitor.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/monitor.go index 4a0270a87a..767a4cf3d6 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/client/monitor.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/monitor.go @@ -5,8 +5,8 @@ import ( "reflect" "github.com/google/uuid" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) const emptyUUID = "00000000-0000-0000-0000-000000000000" diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/client/options.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/options.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/client/options.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/client/options.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/database.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/database.go similarity index 93% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/database.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/database.go index 12f1222f19..9bdb69568b 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/database/database.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/database.go @@ -2,8 +2,8 @@ package database import ( "github.com/google/uuid" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // Database abstracts a database that a server can use to store and transact data diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/doc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/doc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/doc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/doc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/inmemory/doc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/inmemory/doc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/inmemory/doc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/inmemory/doc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/inmemory/inmemory.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/inmemory/inmemory.go similarity index 93% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/inmemory/inmemory.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/inmemory/inmemory.go index 6c1dce9e79..763dcd7fd0 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/database/inmemory/inmemory.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/inmemory/inmemory.go @@ -9,11 +9,11 @@ import ( "github.com/go-logr/logr" "github.com/go-logr/stdr" "github.com/google/uuid" - "github.com/ovn-org/libovsdb/cache" - dbase "github.com/ovn-org/libovsdb/database" - "github.com/ovn-org/libovsdb/database/transaction" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/cache" + dbase "github.com/ovn-kubernetes/libovsdb/database" + "github.com/ovn-kubernetes/libovsdb/database/transaction" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) type inMemoryDatabase struct { diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/references.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/references.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/references.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/references.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/doc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/doc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/doc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/doc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/errors.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/errors.go similarity index 91% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/errors.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/errors.go index 35e47c7294..204a7f544a 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/errors.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/errors.go @@ -3,7 +3,7 @@ package transaction import ( "fmt" - "github.com/ovn-org/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/cache" ) func newIndexExistsDetails(err cache.ErrIndexExists) string { diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/transaction.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/transaction.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/transaction.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/transaction.go index 69736d0048..77b8e920c0 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/database/transaction/transaction.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/database/transaction/transaction.go @@ -7,11 +7,11 @@ import ( "github.com/go-logr/logr" "github.com/google/uuid" - "github.com/ovn-org/libovsdb/cache" - "github.com/ovn-org/libovsdb/database" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" - "github.com/ovn-org/libovsdb/updates" + "github.com/ovn-kubernetes/libovsdb/cache" + "github.com/ovn-kubernetes/libovsdb/database" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/updates" ) type Transaction struct { diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/mapper/info.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/mapper/info.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/mapper/info.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/mapper/info.go index 8ac436c790..0e24ef25ec 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/mapper/info.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/mapper/info.go @@ -4,7 +4,7 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // ErrColumnNotFound is an error that can occur when the column does not exist for a table diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/mapper/mapper.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/mapper/mapper.go index 5ca7a412bb..24ce7b3b8c 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/mapper/mapper.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/mapper/mapper.go @@ -4,7 +4,7 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // Mapper offers functions to interact with libovsdb through user-provided native structs. diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/model/client.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/client.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/model/client.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/client.go index 5eb686244a..e8a39260e9 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/model/client.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/client.go @@ -4,8 +4,8 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/mapper" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // ColumnKey addresses a column and optionally a key within a column diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/model/database.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/database.go similarity index 97% rename from go-controller/vendor/github.com/ovn-org/libovsdb/model/database.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/database.go index 0857d903f3..30ccff67b1 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/model/database.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/database.go @@ -4,8 +4,8 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/mapper" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // A DatabaseModel represents libovsdb's metadata about the database. diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/model/model.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/model.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/model/model.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/model.go index c8575f5bf3..249db69921 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/model/model.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/model/model.go @@ -5,7 +5,7 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // A Model is the base interface used to build Database Models. It is used diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/bindings.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/bindings.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/bindings.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/condition.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/condition.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/condition.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/error.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/error.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/error.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/map.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/map.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/map.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/monitor_select.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/monitor_select.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/monitor_select.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/mutation.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/mutation.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/mutation.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/named_uuid.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/named_uuid.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/named_uuid.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/notation.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/notation.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/notation.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/row.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/row.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/row.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/rpc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/rpc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/rpc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/schema.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/schema.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/schema.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/.gitignore similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/.gitignore rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/.gitignore diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/database.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/database.go index 274a7164fe..a93ca0d86f 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/database.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/database.go @@ -3,7 +3,7 @@ package serverdb -import "github.com/ovn-org/libovsdb/model" +import "github.com/ovn-kubernetes/libovsdb/model" const DatabaseTable = "Database" diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/gen.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/gen.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/gen.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/model.go similarity index 95% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/model.go index 3c117faa26..c0aeeb74c3 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/serverdb/model.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb/model.go @@ -6,8 +6,8 @@ package serverdb import ( "encoding/json" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // FullDatabaseModel returns the DatabaseModel object to be used in libovsdb diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/set.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/set.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/set.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/update3.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/update3.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/update3.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/updates.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/updates.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/updates.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/updates2.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/updates2.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/updates2.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/uuid.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/ovsdb/uuid.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/ovsdb/uuid.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/server/doc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/doc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/server/doc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/doc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/server/monitor.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/monitor.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/server/monitor.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/monitor.go index 2dedf992b0..305769a212 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/server/monitor.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/monitor.go @@ -7,8 +7,8 @@ import ( "github.com/cenkalti/rpc2" "github.com/google/uuid" - "github.com/ovn-org/libovsdb/database" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/database" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // connectionMonitors maps a connection to a map or monitors diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/server/server.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/server.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/server/server.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/server.go index ec60ea5d20..830560fc36 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/server/server.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/server/server.go @@ -14,9 +14,9 @@ import ( "github.com/go-logr/logr" "github.com/go-logr/stdr" "github.com/google/uuid" - "github.com/ovn-org/libovsdb/database" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/database" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // OvsdbServer is an ovsdb server diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/difference.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/difference.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/updates/difference.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/difference.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/doc.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/doc.go similarity index 100% rename from go-controller/vendor/github.com/ovn-org/libovsdb/updates/doc.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/doc.go diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/merge.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/merge.go similarity index 98% rename from go-controller/vendor/github.com/ovn-org/libovsdb/updates/merge.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/merge.go index 562f226232..82d78239f6 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/merge.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/merge.go @@ -4,7 +4,7 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) func merge(ts *ovsdb.TableSchema, a, b modelUpdate) (modelUpdate, error) { diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/mutate.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/mutate.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/updates/mutate.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/mutate.go index 1d87737fcd..b91ef85341 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/mutate.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/mutate.go @@ -3,7 +3,7 @@ package updates import ( "reflect" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) func removeFromSlice(a, b reflect.Value) (reflect.Value, bool) { diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/references.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/references.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/updates/references.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/references.go index 938d02aae9..4d998e0511 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/references.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/references.go @@ -3,9 +3,9 @@ package updates import ( "fmt" - "github.com/ovn-org/libovsdb/database" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/database" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) // ReferenceProvider should be implemented by a database that tracks references diff --git a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/updates.go b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/updates.go similarity index 99% rename from go-controller/vendor/github.com/ovn-org/libovsdb/updates/updates.go rename to go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/updates.go index 4ff2363a05..00fbcccffa 100644 --- a/go-controller/vendor/github.com/ovn-org/libovsdb/updates/updates.go +++ b/go-controller/vendor/github.com/ovn-kubernetes/libovsdb/updates/updates.go @@ -4,9 +4,9 @@ import ( "fmt" "reflect" - "github.com/ovn-org/libovsdb/mapper" - "github.com/ovn-org/libovsdb/model" - "github.com/ovn-org/libovsdb/ovsdb" + "github.com/ovn-kubernetes/libovsdb/mapper" + "github.com/ovn-kubernetes/libovsdb/model" + "github.com/ovn-kubernetes/libovsdb/ovsdb" ) type rowUpdate2 = ovsdb.RowUpdate2 diff --git a/go-controller/vendor/golang.org/x/net/context/context.go b/go-controller/vendor/golang.org/x/net/context/context.go index cf66309c4a..db1c95fab1 100644 --- a/go-controller/vendor/golang.org/x/net/context/context.go +++ b/go-controller/vendor/golang.org/x/net/context/context.go @@ -3,29 +3,31 @@ // license that can be found in the LICENSE file. // Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries +// cancellation signals, and other request-scoped values across API boundaries // and between processes. // As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. +// name [context], and migrating to it can be done automatically with [go fix]. // -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// Incoming requests to a server should create a [Context], and outgoing +// calls to servers should accept a Context. The chain of function +// calls between them must propagate the Context, optionally replacing +// it with a derived Context created using [WithCancel], [WithDeadline], +// [WithTimeout], or [WithValue]. // // Programs that use Contexts should follow these rules to keep interfaces // consistent across packages and enable static analysis tools to check context // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first +// explicitly to each function that needs it. This is discussed further in +// https://go.dev/blog/context-and-structs. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // -// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and @@ -34,9 +36,30 @@ // The same Context may be passed to functions running in different goroutines; // Contexts are safe for simultaneous use by multiple goroutines. // -// See http://blog.golang.org/context for example code for a server that uses +// See https://go.dev/blog/context for example code for a server that uses // Contexts. -package context // import "golang.org/x/net/context" +// +// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +// A Context carries a deadline, a cancellation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// Canceled is the error returned by [Context.Err] when the context is canceled +// for some reason other than its deadline passing. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled +// due to its deadline passing. +var DeadlineExceeded = context.DeadlineExceeded // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, @@ -49,8 +72,73 @@ func Background() Context { // TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. +// parameter). func TODO() Context { return todo } + +var ( + background = context.Background() + todo = context.TODO() +) + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// A CancelFunc may be called by multiple goroutines simultaneously. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc + +// WithCancel returns a derived context that points to the parent context +// but has a new Done channel. The returned context's Done channel is closed +// when the returned cancel function is called or when the parent context's +// Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + return context.WithCancel(parent) +} + +// WithDeadline returns a derived context that points to the parent context +// but has the deadline adjusted to be no later than d. If the parent's +// deadline is already earlier than d, WithDeadline(parent, d) is semantically +// equivalent to parent. The returned [Context.Done] channel is closed when +// the deadline expires, when the returned cancel function is called, +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { + return context.WithDeadline(parent, d) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return context.WithTimeout(parent, timeout) +} + +// WithValue returns a derived context that points to the parent Context. +// In the derived context, the value associated with key is val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The provided key must be comparable and should not be of type +// string or any other built-in type to avoid collisions between +// packages using context. Users of WithValue should define their own +// types for keys. To avoid allocating when assigning to an +// interface{}, context keys often have concrete type +// struct{}. Alternatively, exported context key variables' static +// type should be a pointer or interface. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/go-controller/vendor/golang.org/x/net/context/go17.go b/go-controller/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 0c1b867937..0000000000 --- a/go-controller/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/go-controller/vendor/golang.org/x/net/context/go19.go b/go-controller/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index e31e35a904..0000000000 --- a/go-controller/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/go-controller/vendor/golang.org/x/net/context/pre_go17.go b/go-controller/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 065ff3dfa5..0000000000 --- a/go-controller/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/go-controller/vendor/golang.org/x/net/context/pre_go19.go b/go-controller/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index ec5a638033..0000000000 --- a/go-controller/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/go-controller/vendor/golang.org/x/net/html/atom/table.go b/go-controller/vendor/golang.org/x/net/html/atom/table.go index 2a938864cb..b460e6f722 100644 --- a/go-controller/vendor/golang.org/x/net/html/atom/table.go +++ b/go-controller/vendor/golang.org/x/net/html/atom/table.go @@ -11,23 +11,23 @@ const ( AcceptCharset Atom = 0x1a0e Accesskey Atom = 0x2c09 Acronym Atom = 0xaa07 - Action Atom = 0x27206 - Address Atom = 0x6f307 + Action Atom = 0x26506 + Address Atom = 0x6f107 Align Atom = 0xb105 - Allowfullscreen Atom = 0x2080f + Allowfullscreen Atom = 0x3280f Allowpaymentrequest Atom = 0xc113 Allowusermedia Atom = 0xdd0e Alt Atom = 0xf303 Annotation Atom = 0x1c90a AnnotationXml Atom = 0x1c90e - Applet Atom = 0x31906 - Area Atom = 0x35604 - Article Atom = 0x3fc07 + Applet Atom = 0x30806 + Area Atom = 0x35004 + Article Atom = 0x3f607 As Atom = 0x3c02 Aside Atom = 0x10705 Async Atom = 0xff05 Audio Atom = 0x11505 - Autocomplete Atom = 0x2780c + Autocomplete Atom = 0x26b0c Autofocus Atom = 0x12109 Autoplay Atom = 0x13c08 B Atom = 0x101 @@ -43,34 +43,34 @@ const ( Br Atom = 0x202 Button Atom = 0x19106 Canvas Atom = 0x10306 - Caption Atom = 0x23107 - Center Atom = 0x22006 - Challenge Atom = 0x29b09 + Caption Atom = 0x22407 + Center Atom = 0x21306 + Challenge Atom = 0x28e09 Charset Atom = 0x2107 - Checked Atom = 0x47907 + Checked Atom = 0x5b507 Cite Atom = 0x19c04 - Class Atom = 0x56405 - Code Atom = 0x5c504 + Class Atom = 0x55805 + Code Atom = 0x5ee04 Col Atom = 0x1ab03 Colgroup Atom = 0x1ab08 Color Atom = 0x1bf05 Cols Atom = 0x1c404 Colspan Atom = 0x1c407 Command Atom = 0x1d707 - Content Atom = 0x58b07 - Contenteditable Atom = 0x58b0f - Contextmenu Atom = 0x3800b + Content Atom = 0x57b07 + Contenteditable Atom = 0x57b0f + Contextmenu Atom = 0x37a0b Controls Atom = 0x1de08 - Coords Atom = 0x1ea06 - Crossorigin Atom = 0x1fb0b - Data Atom = 0x4a504 - Datalist Atom = 0x4a508 - Datetime Atom = 0x2b808 - Dd Atom = 0x2d702 + Coords Atom = 0x1f006 + Crossorigin Atom = 0x1fa0b + Data Atom = 0x49904 + Datalist Atom = 0x49908 + Datetime Atom = 0x2ab08 + Dd Atom = 0x2bf02 Default Atom = 0x10a07 - Defer Atom = 0x5c705 - Del Atom = 0x45203 - Desc Atom = 0x56104 + Defer Atom = 0x5f005 + Del Atom = 0x44c03 + Desc Atom = 0x55504 Details Atom = 0x7207 Dfn Atom = 0x8703 Dialog Atom = 0xbb06 @@ -78,106 +78,106 @@ const ( Dirname Atom = 0x9307 Disabled Atom = 0x16408 Div Atom = 0x16b03 - Dl Atom = 0x5e602 - Download Atom = 0x46308 + Dl Atom = 0x5d602 + Download Atom = 0x45d08 Draggable Atom = 0x17a09 - Dropzone Atom = 0x40508 - Dt Atom = 0x64b02 + Dropzone Atom = 0x3ff08 + Dt Atom = 0x64002 Em Atom = 0x6e02 Embed Atom = 0x6e05 - Enctype Atom = 0x28d07 - Face Atom = 0x21e04 - Fieldset Atom = 0x22608 - Figcaption Atom = 0x22e0a - Figure Atom = 0x24806 + Enctype Atom = 0x28007 + Face Atom = 0x21104 + Fieldset Atom = 0x21908 + Figcaption Atom = 0x2210a + Figure Atom = 0x23b06 Font Atom = 0x3f04 Footer Atom = 0xf606 - For Atom = 0x25403 - ForeignObject Atom = 0x2540d - Foreignobject Atom = 0x2610d - Form Atom = 0x26e04 - Formaction Atom = 0x26e0a - Formenctype Atom = 0x2890b - Formmethod Atom = 0x2a40a - Formnovalidate Atom = 0x2ae0e - Formtarget Atom = 0x2c00a + For Atom = 0x24703 + ForeignObject Atom = 0x2470d + Foreignobject Atom = 0x2540d + Form Atom = 0x26104 + Formaction Atom = 0x2610a + Formenctype Atom = 0x27c0b + Formmethod Atom = 0x2970a + Formnovalidate Atom = 0x2a10e + Formtarget Atom = 0x2b30a Frame Atom = 0x8b05 Frameset Atom = 0x8b08 H1 Atom = 0x15c02 - H2 Atom = 0x2de02 - H3 Atom = 0x30d02 - H4 Atom = 0x34502 - H5 Atom = 0x34f02 - H6 Atom = 0x64d02 - Head Atom = 0x33104 - Header Atom = 0x33106 - Headers Atom = 0x33107 + H2 Atom = 0x56102 + H3 Atom = 0x2cd02 + H4 Atom = 0x2fc02 + H5 Atom = 0x33f02 + H6 Atom = 0x34902 + Head Atom = 0x32004 + Header Atom = 0x32006 + Headers Atom = 0x32007 Height Atom = 0x5206 - Hgroup Atom = 0x2ca06 - Hidden Atom = 0x2d506 - High Atom = 0x2db04 + Hgroup Atom = 0x64206 + Hidden Atom = 0x2bd06 + High Atom = 0x2ca04 Hr Atom = 0x15702 - Href Atom = 0x2e004 - Hreflang Atom = 0x2e008 + Href Atom = 0x2cf04 + Hreflang Atom = 0x2cf08 Html Atom = 0x5604 - HttpEquiv Atom = 0x2e80a + HttpEquiv Atom = 0x2d70a I Atom = 0x601 - Icon Atom = 0x58a04 + Icon Atom = 0x57a04 Id Atom = 0x10902 - Iframe Atom = 0x2fc06 - Image Atom = 0x30205 - Img Atom = 0x30703 - Input Atom = 0x44b05 - Inputmode Atom = 0x44b09 - Ins Atom = 0x20403 - Integrity Atom = 0x23f09 + Iframe Atom = 0x2eb06 + Image Atom = 0x2f105 + Img Atom = 0x2f603 + Input Atom = 0x44505 + Inputmode Atom = 0x44509 + Ins Atom = 0x20303 + Integrity Atom = 0x23209 Is Atom = 0x16502 - Isindex Atom = 0x30f07 - Ismap Atom = 0x31605 - Itemid Atom = 0x38b06 + Isindex Atom = 0x2fe07 + Ismap Atom = 0x30505 + Itemid Atom = 0x38506 Itemprop Atom = 0x19d08 - Itemref Atom = 0x3cd07 - Itemscope Atom = 0x67109 - Itemtype Atom = 0x31f08 + Itemref Atom = 0x3c707 + Itemscope Atom = 0x66f09 + Itemtype Atom = 0x30e08 Kbd Atom = 0xb903 Keygen Atom = 0x3206 Keytype Atom = 0xd607 Kind Atom = 0x17704 Label Atom = 0x5905 - Lang Atom = 0x2e404 + Lang Atom = 0x2d304 Legend Atom = 0x18106 Li Atom = 0xb202 Link Atom = 0x17404 - List Atom = 0x4a904 - Listing Atom = 0x4a907 + List Atom = 0x49d04 + Listing Atom = 0x49d07 Loop Atom = 0x5d04 Low Atom = 0xc303 Main Atom = 0x1004 Malignmark Atom = 0xb00a - Manifest Atom = 0x6d708 - Map Atom = 0x31803 + Manifest Atom = 0x6d508 + Map Atom = 0x30703 Mark Atom = 0xb604 - Marquee Atom = 0x32707 - Math Atom = 0x32e04 - Max Atom = 0x33d03 - Maxlength Atom = 0x33d09 + Marquee Atom = 0x31607 + Math Atom = 0x31d04 + Max Atom = 0x33703 + Maxlength Atom = 0x33709 Media Atom = 0xe605 Mediagroup Atom = 0xe60a - Menu Atom = 0x38704 - Menuitem Atom = 0x38708 - Meta Atom = 0x4b804 + Menu Atom = 0x38104 + Menuitem Atom = 0x38108 + Meta Atom = 0x4ac04 Meter Atom = 0x9805 - Method Atom = 0x2a806 - Mglyph Atom = 0x30806 - Mi Atom = 0x34702 - Min Atom = 0x34703 - Minlength Atom = 0x34709 - Mn Atom = 0x2b102 + Method Atom = 0x29b06 + Mglyph Atom = 0x2f706 + Mi Atom = 0x34102 + Min Atom = 0x34103 + Minlength Atom = 0x34109 + Mn Atom = 0x2a402 Mo Atom = 0xa402 - Ms Atom = 0x67402 - Mtext Atom = 0x35105 - Multiple Atom = 0x35f08 - Muted Atom = 0x36705 + Ms Atom = 0x67202 + Mtext Atom = 0x34b05 + Multiple Atom = 0x35908 + Muted Atom = 0x36105 Name Atom = 0x9604 Nav Atom = 0x1303 Nobr Atom = 0x3704 @@ -185,101 +185,101 @@ const ( Noframes Atom = 0x8908 Nomodule Atom = 0xa208 Nonce Atom = 0x1a605 - Noscript Atom = 0x21608 - Novalidate Atom = 0x2b20a - Object Atom = 0x26806 + Noscript Atom = 0x2c208 + Novalidate Atom = 0x2a50a + Object Atom = 0x25b06 Ol Atom = 0x13702 Onabort Atom = 0x19507 - Onafterprint Atom = 0x2360c - Onautocomplete Atom = 0x2760e - Onautocompleteerror Atom = 0x27613 - Onauxclick Atom = 0x61f0a - Onbeforeprint Atom = 0x69e0d - Onbeforeunload Atom = 0x6e70e - Onblur Atom = 0x56d06 + Onafterprint Atom = 0x2290c + Onautocomplete Atom = 0x2690e + Onautocompleteerror Atom = 0x26913 + Onauxclick Atom = 0x6140a + Onbeforeprint Atom = 0x69c0d + Onbeforeunload Atom = 0x6e50e + Onblur Atom = 0x1ea06 Oncancel Atom = 0x11908 Oncanplay Atom = 0x14d09 Oncanplaythrough Atom = 0x14d10 - Onchange Atom = 0x41b08 - Onclick Atom = 0x2f507 - Onclose Atom = 0x36c07 - Oncontextmenu Atom = 0x37e0d - Oncopy Atom = 0x39106 - Oncuechange Atom = 0x3970b - Oncut Atom = 0x3a205 - Ondblclick Atom = 0x3a70a - Ondrag Atom = 0x3b106 - Ondragend Atom = 0x3b109 - Ondragenter Atom = 0x3ba0b - Ondragexit Atom = 0x3c50a - Ondragleave Atom = 0x3df0b - Ondragover Atom = 0x3ea0a - Ondragstart Atom = 0x3f40b - Ondrop Atom = 0x40306 - Ondurationchange Atom = 0x41310 - Onemptied Atom = 0x40a09 - Onended Atom = 0x42307 - Onerror Atom = 0x42a07 - Onfocus Atom = 0x43107 - Onhashchange Atom = 0x43d0c - Oninput Atom = 0x44907 - Oninvalid Atom = 0x45509 - Onkeydown Atom = 0x45e09 - Onkeypress Atom = 0x46b0a - Onkeyup Atom = 0x48007 - Onlanguagechange Atom = 0x48d10 - Onload Atom = 0x49d06 - Onloadeddata Atom = 0x49d0c - Onloadedmetadata Atom = 0x4b010 - Onloadend Atom = 0x4c609 - Onloadstart Atom = 0x4cf0b - Onmessage Atom = 0x4da09 - Onmessageerror Atom = 0x4da0e - Onmousedown Atom = 0x4e80b - Onmouseenter Atom = 0x4f30c - Onmouseleave Atom = 0x4ff0c - Onmousemove Atom = 0x50b0b - Onmouseout Atom = 0x5160a - Onmouseover Atom = 0x5230b - Onmouseup Atom = 0x52e09 - Onmousewheel Atom = 0x53c0c - Onoffline Atom = 0x54809 - Ononline Atom = 0x55108 - Onpagehide Atom = 0x5590a - Onpageshow Atom = 0x5730a - Onpaste Atom = 0x57f07 - Onpause Atom = 0x59a07 - Onplay Atom = 0x5a406 - Onplaying Atom = 0x5a409 - Onpopstate Atom = 0x5ad0a - Onprogress Atom = 0x5b70a - Onratechange Atom = 0x5cc0c - Onrejectionhandled Atom = 0x5d812 - Onreset Atom = 0x5ea07 - Onresize Atom = 0x5f108 - Onscroll Atom = 0x60008 - Onsecuritypolicyviolation Atom = 0x60819 - Onseeked Atom = 0x62908 - Onseeking Atom = 0x63109 - Onselect Atom = 0x63a08 - Onshow Atom = 0x64406 - Onsort Atom = 0x64f06 - Onstalled Atom = 0x65909 - Onstorage Atom = 0x66209 - Onsubmit Atom = 0x66b08 - Onsuspend Atom = 0x67b09 + Onchange Atom = 0x41508 + Onclick Atom = 0x2e407 + Onclose Atom = 0x36607 + Oncontextmenu Atom = 0x3780d + Oncopy Atom = 0x38b06 + Oncuechange Atom = 0x3910b + Oncut Atom = 0x39c05 + Ondblclick Atom = 0x3a10a + Ondrag Atom = 0x3ab06 + Ondragend Atom = 0x3ab09 + Ondragenter Atom = 0x3b40b + Ondragexit Atom = 0x3bf0a + Ondragleave Atom = 0x3d90b + Ondragover Atom = 0x3e40a + Ondragstart Atom = 0x3ee0b + Ondrop Atom = 0x3fd06 + Ondurationchange Atom = 0x40d10 + Onemptied Atom = 0x40409 + Onended Atom = 0x41d07 + Onerror Atom = 0x42407 + Onfocus Atom = 0x42b07 + Onhashchange Atom = 0x4370c + Oninput Atom = 0x44307 + Oninvalid Atom = 0x44f09 + Onkeydown Atom = 0x45809 + Onkeypress Atom = 0x4650a + Onkeyup Atom = 0x47407 + Onlanguagechange Atom = 0x48110 + Onload Atom = 0x49106 + Onloadeddata Atom = 0x4910c + Onloadedmetadata Atom = 0x4a410 + Onloadend Atom = 0x4ba09 + Onloadstart Atom = 0x4c30b + Onmessage Atom = 0x4ce09 + Onmessageerror Atom = 0x4ce0e + Onmousedown Atom = 0x4dc0b + Onmouseenter Atom = 0x4e70c + Onmouseleave Atom = 0x4f30c + Onmousemove Atom = 0x4ff0b + Onmouseout Atom = 0x50a0a + Onmouseover Atom = 0x5170b + Onmouseup Atom = 0x52209 + Onmousewheel Atom = 0x5300c + Onoffline Atom = 0x53c09 + Ononline Atom = 0x54508 + Onpagehide Atom = 0x54d0a + Onpageshow Atom = 0x5630a + Onpaste Atom = 0x56f07 + Onpause Atom = 0x58a07 + Onplay Atom = 0x59406 + Onplaying Atom = 0x59409 + Onpopstate Atom = 0x59d0a + Onprogress Atom = 0x5a70a + Onratechange Atom = 0x5bc0c + Onrejectionhandled Atom = 0x5c812 + Onreset Atom = 0x5da07 + Onresize Atom = 0x5e108 + Onscroll Atom = 0x5f508 + Onsecuritypolicyviolation Atom = 0x5fd19 + Onseeked Atom = 0x61e08 + Onseeking Atom = 0x62609 + Onselect Atom = 0x62f08 + Onshow Atom = 0x63906 + Onsort Atom = 0x64d06 + Onstalled Atom = 0x65709 + Onstorage Atom = 0x66009 + Onsubmit Atom = 0x66908 + Onsuspend Atom = 0x67909 Ontimeupdate Atom = 0x400c - Ontoggle Atom = 0x68408 - Onunhandledrejection Atom = 0x68c14 - Onunload Atom = 0x6ab08 - Onvolumechange Atom = 0x6b30e - Onwaiting Atom = 0x6c109 - Onwheel Atom = 0x6ca07 + Ontoggle Atom = 0x68208 + Onunhandledrejection Atom = 0x68a14 + Onunload Atom = 0x6a908 + Onvolumechange Atom = 0x6b10e + Onwaiting Atom = 0x6bf09 + Onwheel Atom = 0x6c807 Open Atom = 0x1a304 Optgroup Atom = 0x5f08 - Optimum Atom = 0x6d107 - Option Atom = 0x6e306 - Output Atom = 0x51d06 + Optimum Atom = 0x6cf07 + Option Atom = 0x6e106 + Output Atom = 0x51106 P Atom = 0xc01 Param Atom = 0xc05 Pattern Atom = 0x6607 @@ -288,466 +288,468 @@ const ( Placeholder Atom = 0x1310b Plaintext Atom = 0x1b209 Playsinline Atom = 0x1400b - Poster Atom = 0x2cf06 - Pre Atom = 0x47003 - Preload Atom = 0x48607 - Progress Atom = 0x5b908 - Prompt Atom = 0x53606 - Public Atom = 0x58606 + Poster Atom = 0x64706 + Pre Atom = 0x46a03 + Preload Atom = 0x47a07 + Progress Atom = 0x5a908 + Prompt Atom = 0x52a06 + Public Atom = 0x57606 Q Atom = 0xcf01 Radiogroup Atom = 0x30a Rb Atom = 0x3a02 - Readonly Atom = 0x35708 - Referrerpolicy Atom = 0x3d10e - Rel Atom = 0x48703 - Required Atom = 0x24c08 + Readonly Atom = 0x35108 + Referrerpolicy Atom = 0x3cb0e + Rel Atom = 0x47b03 + Required Atom = 0x23f08 Reversed Atom = 0x8008 Rows Atom = 0x9c04 Rowspan Atom = 0x9c07 - Rp Atom = 0x23c02 + Rp Atom = 0x22f02 Rt Atom = 0x19a02 Rtc Atom = 0x19a03 Ruby Atom = 0xfb04 S Atom = 0x2501 Samp Atom = 0x7804 Sandbox Atom = 0x12907 - Scope Atom = 0x67505 - Scoped Atom = 0x67506 - Script Atom = 0x21806 - Seamless Atom = 0x37108 - Section Atom = 0x56807 - Select Atom = 0x63c06 - Selected Atom = 0x63c08 - Shape Atom = 0x1e505 - Size Atom = 0x5f504 - Sizes Atom = 0x5f505 - Slot Atom = 0x1ef04 - Small Atom = 0x20605 - Sortable Atom = 0x65108 - Sorted Atom = 0x33706 - Source Atom = 0x37806 - Spacer Atom = 0x43706 + Scope Atom = 0x67305 + Scoped Atom = 0x67306 + Script Atom = 0x2c406 + Seamless Atom = 0x36b08 + Search Atom = 0x55c06 + Section Atom = 0x1e507 + Select Atom = 0x63106 + Selected Atom = 0x63108 + Shape Atom = 0x1f505 + Size Atom = 0x5e504 + Sizes Atom = 0x5e505 + Slot Atom = 0x20504 + Small Atom = 0x32605 + Sortable Atom = 0x64f08 + Sorted Atom = 0x37206 + Source Atom = 0x43106 + Spacer Atom = 0x46e06 Span Atom = 0x9f04 - Spellcheck Atom = 0x4740a - Src Atom = 0x5c003 - Srcdoc Atom = 0x5c006 - Srclang Atom = 0x5f907 - Srcset Atom = 0x6f906 - Start Atom = 0x3fa05 - Step Atom = 0x58304 + Spellcheck Atom = 0x5b00a + Src Atom = 0x5e903 + Srcdoc Atom = 0x5e906 + Srclang Atom = 0x6f707 + Srcset Atom = 0x6fe06 + Start Atom = 0x3f405 + Step Atom = 0x57304 Strike Atom = 0xd206 - Strong Atom = 0x6dd06 - Style Atom = 0x6ff05 - Sub Atom = 0x66d03 - Summary Atom = 0x70407 - Sup Atom = 0x70b03 - Svg Atom = 0x70e03 - System Atom = 0x71106 - Tabindex Atom = 0x4be08 - Table Atom = 0x59505 - Target Atom = 0x2c406 + Strong Atom = 0x6db06 + Style Atom = 0x70405 + Sub Atom = 0x66b03 + Summary Atom = 0x70907 + Sup Atom = 0x71003 + Svg Atom = 0x71303 + System Atom = 0x71606 + Tabindex Atom = 0x4b208 + Table Atom = 0x58505 + Target Atom = 0x2b706 Tbody Atom = 0x2705 Td Atom = 0x9202 - Template Atom = 0x71408 - Textarea Atom = 0x35208 + Template Atom = 0x71908 + Textarea Atom = 0x34c08 Tfoot Atom = 0xf505 Th Atom = 0x15602 - Thead Atom = 0x33005 + Thead Atom = 0x31f05 Time Atom = 0x4204 Title Atom = 0x11005 Tr Atom = 0xcc02 Track Atom = 0x1ba05 - Translate Atom = 0x1f209 + Translate Atom = 0x20809 Tt Atom = 0x6802 Type Atom = 0xd904 - Typemustmatch Atom = 0x2900d + Typemustmatch Atom = 0x2830d U Atom = 0xb01 Ul Atom = 0xa702 Updateviacache Atom = 0x460e - Usemap Atom = 0x59e06 + Usemap Atom = 0x58e06 Value Atom = 0x1505 Var Atom = 0x16d03 - Video Atom = 0x2f105 - Wbr Atom = 0x57c03 - Width Atom = 0x64905 - Workertype Atom = 0x71c0a - Wrap Atom = 0x72604 + Video Atom = 0x2e005 + Wbr Atom = 0x56c03 + Width Atom = 0x63e05 + Workertype Atom = 0x7210a + Wrap Atom = 0x72b04 Xmp Atom = 0x12f03 ) -const hash0 = 0x81cdf10e +const hash0 = 0x84f70e16 const maxAtomLen = 25 var table = [1 << 9]Atom{ - 0x1: 0xe60a, // mediagroup - 0x2: 0x2e404, // lang - 0x4: 0x2c09, // accesskey - 0x5: 0x8b08, // frameset - 0x7: 0x63a08, // onselect - 0x8: 0x71106, // system - 0xa: 0x64905, // width - 0xc: 0x2890b, // formenctype - 0xd: 0x13702, // ol - 0xe: 0x3970b, // oncuechange - 0x10: 0x14b03, // bdo - 0x11: 0x11505, // audio - 0x12: 0x17a09, // draggable - 0x14: 0x2f105, // video - 0x15: 0x2b102, // mn - 0x16: 0x38704, // menu - 0x17: 0x2cf06, // poster - 0x19: 0xf606, // footer - 0x1a: 0x2a806, // method - 0x1b: 0x2b808, // datetime - 0x1c: 0x19507, // onabort - 0x1d: 0x460e, // updateviacache - 0x1e: 0xff05, // async - 0x1f: 0x49d06, // onload - 0x21: 0x11908, // oncancel - 0x22: 0x62908, // onseeked - 0x23: 0x30205, // image - 0x24: 0x5d812, // onrejectionhandled - 0x26: 0x17404, // link - 0x27: 0x51d06, // output - 0x28: 0x33104, // head - 0x29: 0x4ff0c, // onmouseleave - 0x2a: 0x57f07, // onpaste - 0x2b: 0x5a409, // onplaying - 0x2c: 0x1c407, // colspan - 0x2f: 0x1bf05, // color - 0x30: 0x5f504, // size - 0x31: 0x2e80a, // http-equiv - 0x33: 0x601, // i - 0x34: 0x5590a, // onpagehide - 0x35: 0x68c14, // onunhandledrejection - 0x37: 0x42a07, // onerror - 0x3a: 0x3b08, // basefont - 0x3f: 0x1303, // nav - 0x40: 0x17704, // kind - 0x41: 0x35708, // readonly - 0x42: 0x30806, // mglyph - 0x44: 0xb202, // li - 0x46: 0x2d506, // hidden - 0x47: 0x70e03, // svg - 0x48: 0x58304, // step - 0x49: 0x23f09, // integrity - 0x4a: 0x58606, // public - 0x4c: 0x1ab03, // col - 0x4d: 0x1870a, // blockquote - 0x4e: 0x34f02, // h5 - 0x50: 0x5b908, // progress - 0x51: 0x5f505, // sizes - 0x52: 0x34502, // h4 - 0x56: 0x33005, // thead - 0x57: 0xd607, // keytype - 0x58: 0x5b70a, // onprogress - 0x59: 0x44b09, // inputmode - 0x5a: 0x3b109, // ondragend - 0x5d: 0x3a205, // oncut - 0x5e: 0x43706, // spacer - 0x5f: 0x1ab08, // colgroup - 0x62: 0x16502, // is - 0x65: 0x3c02, // as - 0x66: 0x54809, // onoffline - 0x67: 0x33706, // sorted - 0x69: 0x48d10, // onlanguagechange - 0x6c: 0x43d0c, // onhashchange - 0x6d: 0x9604, // name - 0x6e: 0xf505, // tfoot - 0x6f: 0x56104, // desc - 0x70: 0x33d03, // max - 0x72: 0x1ea06, // coords - 0x73: 0x30d02, // h3 - 0x74: 0x6e70e, // onbeforeunload - 0x75: 0x9c04, // rows - 0x76: 0x63c06, // select - 0x77: 0x9805, // meter - 0x78: 0x38b06, // itemid - 0x79: 0x53c0c, // onmousewheel - 0x7a: 0x5c006, // srcdoc - 0x7d: 0x1ba05, // track - 0x7f: 0x31f08, // itemtype - 0x82: 0xa402, // mo - 0x83: 0x41b08, // onchange - 0x84: 0x33107, // headers - 0x85: 0x5cc0c, // onratechange - 0x86: 0x60819, // onsecuritypolicyviolation - 0x88: 0x4a508, // datalist - 0x89: 0x4e80b, // onmousedown - 0x8a: 0x1ef04, // slot - 0x8b: 0x4b010, // onloadedmetadata - 0x8c: 0x1a06, // accept - 0x8d: 0x26806, // object - 0x91: 0x6b30e, // onvolumechange - 0x92: 0x2107, // charset - 0x93: 0x27613, // onautocompleteerror - 0x94: 0xc113, // allowpaymentrequest - 0x95: 0x2804, // body - 0x96: 0x10a07, // default - 0x97: 0x63c08, // selected - 0x98: 0x21e04, // face - 0x99: 0x1e505, // shape - 0x9b: 0x68408, // ontoggle - 0x9e: 0x64b02, // dt - 0x9f: 0xb604, // mark - 0xa1: 0xb01, // u - 0xa4: 0x6ab08, // onunload - 0xa5: 0x5d04, // loop - 0xa6: 0x16408, // disabled - 0xaa: 0x42307, // onended - 0xab: 0xb00a, // malignmark - 0xad: 0x67b09, // onsuspend - 0xae: 0x35105, // mtext - 0xaf: 0x64f06, // onsort - 0xb0: 0x19d08, // itemprop - 0xb3: 0x67109, // itemscope - 0xb4: 0x17305, // blink - 0xb6: 0x3b106, // ondrag - 0xb7: 0xa702, // ul - 0xb8: 0x26e04, // form - 0xb9: 0x12907, // sandbox - 0xba: 0x8b05, // frame - 0xbb: 0x1505, // value - 0xbc: 0x66209, // onstorage - 0xbf: 0xaa07, // acronym - 0xc0: 0x19a02, // rt - 0xc2: 0x202, // br - 0xc3: 0x22608, // fieldset - 0xc4: 0x2900d, // typemustmatch - 0xc5: 0xa208, // nomodule - 0xc6: 0x6c07, // noembed - 0xc7: 0x69e0d, // onbeforeprint - 0xc8: 0x19106, // button - 0xc9: 0x2f507, // onclick - 0xca: 0x70407, // summary - 0xcd: 0xfb04, // ruby - 0xce: 0x56405, // class - 0xcf: 0x3f40b, // ondragstart - 0xd0: 0x23107, // caption - 0xd4: 0xdd0e, // allowusermedia - 0xd5: 0x4cf0b, // onloadstart - 0xd9: 0x16b03, // div - 0xda: 0x4a904, // list - 0xdb: 0x32e04, // math - 0xdc: 0x44b05, // input - 0xdf: 0x3ea0a, // ondragover - 0xe0: 0x2de02, // h2 - 0xe2: 0x1b209, // plaintext - 0xe4: 0x4f30c, // onmouseenter - 0xe7: 0x47907, // checked - 0xe8: 0x47003, // pre - 0xea: 0x35f08, // multiple - 0xeb: 0xba03, // bdi - 0xec: 0x33d09, // maxlength - 0xed: 0xcf01, // q - 0xee: 0x61f0a, // onauxclick - 0xf0: 0x57c03, // wbr - 0xf2: 0x3b04, // base - 0xf3: 0x6e306, // option - 0xf5: 0x41310, // ondurationchange - 0xf7: 0x8908, // noframes - 0xf9: 0x40508, // dropzone - 0xfb: 0x67505, // scope - 0xfc: 0x8008, // reversed - 0xfd: 0x3ba0b, // ondragenter - 0xfe: 0x3fa05, // start - 0xff: 0x12f03, // xmp - 0x100: 0x5f907, // srclang - 0x101: 0x30703, // img - 0x104: 0x101, // b - 0x105: 0x25403, // for - 0x106: 0x10705, // aside - 0x107: 0x44907, // oninput - 0x108: 0x35604, // area - 0x109: 0x2a40a, // formmethod - 0x10a: 0x72604, // wrap - 0x10c: 0x23c02, // rp - 0x10d: 0x46b0a, // onkeypress - 0x10e: 0x6802, // tt - 0x110: 0x34702, // mi - 0x111: 0x36705, // muted - 0x112: 0xf303, // alt - 0x113: 0x5c504, // code - 0x114: 0x6e02, // em - 0x115: 0x3c50a, // ondragexit - 0x117: 0x9f04, // span - 0x119: 0x6d708, // manifest - 0x11a: 0x38708, // menuitem - 0x11b: 0x58b07, // content - 0x11d: 0x6c109, // onwaiting - 0x11f: 0x4c609, // onloadend - 0x121: 0x37e0d, // oncontextmenu - 0x123: 0x56d06, // onblur - 0x124: 0x3fc07, // article - 0x125: 0x9303, // dir - 0x126: 0xef04, // ping - 0x127: 0x24c08, // required - 0x128: 0x45509, // oninvalid - 0x129: 0xb105, // align - 0x12b: 0x58a04, // icon - 0x12c: 0x64d02, // h6 - 0x12d: 0x1c404, // cols - 0x12e: 0x22e0a, // figcaption - 0x12f: 0x45e09, // onkeydown - 0x130: 0x66b08, // onsubmit - 0x131: 0x14d09, // oncanplay - 0x132: 0x70b03, // sup - 0x133: 0xc01, // p - 0x135: 0x40a09, // onemptied - 0x136: 0x39106, // oncopy - 0x137: 0x19c04, // cite - 0x138: 0x3a70a, // ondblclick - 0x13a: 0x50b0b, // onmousemove - 0x13c: 0x66d03, // sub - 0x13d: 0x48703, // rel - 0x13e: 0x5f08, // optgroup - 0x142: 0x9c07, // rowspan - 0x143: 0x37806, // source - 0x144: 0x21608, // noscript - 0x145: 0x1a304, // open - 0x146: 0x20403, // ins - 0x147: 0x2540d, // foreignObject - 0x148: 0x5ad0a, // onpopstate - 0x14a: 0x28d07, // enctype - 0x14b: 0x2760e, // onautocomplete - 0x14c: 0x35208, // textarea - 0x14e: 0x2780c, // autocomplete - 0x14f: 0x15702, // hr - 0x150: 0x1de08, // controls - 0x151: 0x10902, // id - 0x153: 0x2360c, // onafterprint - 0x155: 0x2610d, // foreignobject - 0x156: 0x32707, // marquee - 0x157: 0x59a07, // onpause - 0x158: 0x5e602, // dl - 0x159: 0x5206, // height - 0x15a: 0x34703, // min - 0x15b: 0x9307, // dirname - 0x15c: 0x1f209, // translate - 0x15d: 0x5604, // html - 0x15e: 0x34709, // minlength - 0x15f: 0x48607, // preload - 0x160: 0x71408, // template - 0x161: 0x3df0b, // ondragleave - 0x162: 0x3a02, // rb - 0x164: 0x5c003, // src - 0x165: 0x6dd06, // strong - 0x167: 0x7804, // samp - 0x168: 0x6f307, // address - 0x169: 0x55108, // ononline - 0x16b: 0x1310b, // placeholder - 0x16c: 0x2c406, // target - 0x16d: 0x20605, // small - 0x16e: 0x6ca07, // onwheel - 0x16f: 0x1c90a, // annotation - 0x170: 0x4740a, // spellcheck - 0x171: 0x7207, // details - 0x172: 0x10306, // canvas - 0x173: 0x12109, // autofocus - 0x174: 0xc05, // param - 0x176: 0x46308, // download - 0x177: 0x45203, // del - 0x178: 0x36c07, // onclose - 0x179: 0xb903, // kbd - 0x17a: 0x31906, // applet - 0x17b: 0x2e004, // href - 0x17c: 0x5f108, // onresize - 0x17e: 0x49d0c, // onloadeddata - 0x180: 0xcc02, // tr - 0x181: 0x2c00a, // formtarget - 0x182: 0x11005, // title - 0x183: 0x6ff05, // style - 0x184: 0xd206, // strike - 0x185: 0x59e06, // usemap - 0x186: 0x2fc06, // iframe - 0x187: 0x1004, // main - 0x189: 0x7b07, // picture - 0x18c: 0x31605, // ismap - 0x18e: 0x4a504, // data - 0x18f: 0x5905, // label - 0x191: 0x3d10e, // referrerpolicy - 0x192: 0x15602, // th - 0x194: 0x53606, // prompt - 0x195: 0x56807, // section - 0x197: 0x6d107, // optimum - 0x198: 0x2db04, // high - 0x199: 0x15c02, // h1 - 0x19a: 0x65909, // onstalled - 0x19b: 0x16d03, // var - 0x19c: 0x4204, // time - 0x19e: 0x67402, // ms - 0x19f: 0x33106, // header - 0x1a0: 0x4da09, // onmessage - 0x1a1: 0x1a605, // nonce - 0x1a2: 0x26e0a, // formaction - 0x1a3: 0x22006, // center - 0x1a4: 0x3704, // nobr - 0x1a5: 0x59505, // table - 0x1a6: 0x4a907, // listing - 0x1a7: 0x18106, // legend - 0x1a9: 0x29b09, // challenge - 0x1aa: 0x24806, // figure - 0x1ab: 0xe605, // media - 0x1ae: 0xd904, // type - 0x1af: 0x3f04, // font - 0x1b0: 0x4da0e, // onmessageerror - 0x1b1: 0x37108, // seamless - 0x1b2: 0x8703, // dfn - 0x1b3: 0x5c705, // defer - 0x1b4: 0xc303, // low - 0x1b5: 0x19a03, // rtc - 0x1b6: 0x5230b, // onmouseover - 0x1b7: 0x2b20a, // novalidate - 0x1b8: 0x71c0a, // workertype - 0x1ba: 0x3cd07, // itemref - 0x1bd: 0x1, // a - 0x1be: 0x31803, // map - 0x1bf: 0x400c, // ontimeupdate - 0x1c0: 0x15e07, // bgsound - 0x1c1: 0x3206, // keygen - 0x1c2: 0x2705, // tbody - 0x1c5: 0x64406, // onshow - 0x1c7: 0x2501, // s - 0x1c8: 0x6607, // pattern - 0x1cc: 0x14d10, // oncanplaythrough - 0x1ce: 0x2d702, // dd - 0x1cf: 0x6f906, // srcset - 0x1d0: 0x17003, // big - 0x1d2: 0x65108, // sortable - 0x1d3: 0x48007, // onkeyup - 0x1d5: 0x5a406, // onplay - 0x1d7: 0x4b804, // meta - 0x1d8: 0x40306, // ondrop - 0x1da: 0x60008, // onscroll - 0x1db: 0x1fb0b, // crossorigin - 0x1dc: 0x5730a, // onpageshow - 0x1dd: 0x4, // abbr - 0x1de: 0x9202, // td - 0x1df: 0x58b0f, // contenteditable - 0x1e0: 0x27206, // action - 0x1e1: 0x1400b, // playsinline - 0x1e2: 0x43107, // onfocus - 0x1e3: 0x2e008, // hreflang - 0x1e5: 0x5160a, // onmouseout - 0x1e6: 0x5ea07, // onreset - 0x1e7: 0x13c08, // autoplay - 0x1e8: 0x63109, // onseeking - 0x1ea: 0x67506, // scoped - 0x1ec: 0x30a, // radiogroup - 0x1ee: 0x3800b, // contextmenu - 0x1ef: 0x52e09, // onmouseup - 0x1f1: 0x2ca06, // hgroup - 0x1f2: 0x2080f, // allowfullscreen - 0x1f3: 0x4be08, // tabindex - 0x1f6: 0x30f07, // isindex - 0x1f7: 0x1a0e, // accept-charset - 0x1f8: 0x2ae0e, // formnovalidate - 0x1fb: 0x1c90e, // annotation-xml - 0x1fc: 0x6e05, // embed - 0x1fd: 0x21806, // script - 0x1fe: 0xbb06, // dialog - 0x1ff: 0x1d707, // command + 0x1: 0x3ff08, // dropzone + 0x2: 0x3b08, // basefont + 0x3: 0x23209, // integrity + 0x4: 0x43106, // source + 0x5: 0x2c09, // accesskey + 0x6: 0x1a06, // accept + 0x7: 0x6c807, // onwheel + 0xb: 0x47407, // onkeyup + 0xc: 0x32007, // headers + 0xd: 0x67306, // scoped + 0xe: 0x67909, // onsuspend + 0xf: 0x8908, // noframes + 0x10: 0x1fa0b, // crossorigin + 0x11: 0x2e407, // onclick + 0x12: 0x3f405, // start + 0x13: 0x37a0b, // contextmenu + 0x14: 0x5e903, // src + 0x15: 0x1c404, // cols + 0x16: 0xbb06, // dialog + 0x17: 0x47a07, // preload + 0x18: 0x3c707, // itemref + 0x1b: 0x2f105, // image + 0x1d: 0x4ba09, // onloadend + 0x1e: 0x45d08, // download + 0x1f: 0x46a03, // pre + 0x23: 0x2970a, // formmethod + 0x24: 0x71303, // svg + 0x25: 0xcf01, // q + 0x26: 0x64002, // dt + 0x27: 0x1de08, // controls + 0x2a: 0x2804, // body + 0x2b: 0xd206, // strike + 0x2c: 0x3910b, // oncuechange + 0x2d: 0x4c30b, // onloadstart + 0x2e: 0x2fe07, // isindex + 0x2f: 0xb202, // li + 0x30: 0x1400b, // playsinline + 0x31: 0x34102, // mi + 0x32: 0x30806, // applet + 0x33: 0x4ce09, // onmessage + 0x35: 0x13702, // ol + 0x36: 0x1a304, // open + 0x39: 0x14d09, // oncanplay + 0x3a: 0x6bf09, // onwaiting + 0x3b: 0x11908, // oncancel + 0x3c: 0x6a908, // onunload + 0x3e: 0x53c09, // onoffline + 0x3f: 0x1a0e, // accept-charset + 0x40: 0x32004, // head + 0x42: 0x3ab09, // ondragend + 0x43: 0x1310b, // placeholder + 0x44: 0x2b30a, // formtarget + 0x45: 0x2540d, // foreignobject + 0x47: 0x400c, // ontimeupdate + 0x48: 0xdd0e, // allowusermedia + 0x4a: 0x69c0d, // onbeforeprint + 0x4b: 0x5604, // html + 0x4c: 0x9f04, // span + 0x4d: 0x64206, // hgroup + 0x4e: 0x16408, // disabled + 0x4f: 0x4204, // time + 0x51: 0x42b07, // onfocus + 0x53: 0xb00a, // malignmark + 0x55: 0x4650a, // onkeypress + 0x56: 0x55805, // class + 0x57: 0x1ab08, // colgroup + 0x58: 0x33709, // maxlength + 0x59: 0x5a908, // progress + 0x5b: 0x70405, // style + 0x5c: 0x2a10e, // formnovalidate + 0x5e: 0x38b06, // oncopy + 0x60: 0x26104, // form + 0x61: 0xf606, // footer + 0x64: 0x30a, // radiogroup + 0x66: 0xfb04, // ruby + 0x67: 0x4ff0b, // onmousemove + 0x68: 0x19d08, // itemprop + 0x69: 0x2d70a, // http-equiv + 0x6a: 0x15602, // th + 0x6c: 0x6e02, // em + 0x6d: 0x38108, // menuitem + 0x6e: 0x63106, // select + 0x6f: 0x48110, // onlanguagechange + 0x70: 0x31f05, // thead + 0x71: 0x15c02, // h1 + 0x72: 0x5e906, // srcdoc + 0x75: 0x9604, // name + 0x76: 0x19106, // button + 0x77: 0x55504, // desc + 0x78: 0x17704, // kind + 0x79: 0x1bf05, // color + 0x7c: 0x58e06, // usemap + 0x7d: 0x30e08, // itemtype + 0x7f: 0x6d508, // manifest + 0x81: 0x5300c, // onmousewheel + 0x82: 0x4dc0b, // onmousedown + 0x84: 0xc05, // param + 0x85: 0x2e005, // video + 0x86: 0x4910c, // onloadeddata + 0x87: 0x6f107, // address + 0x8c: 0xef04, // ping + 0x8d: 0x24703, // for + 0x8f: 0x62f08, // onselect + 0x90: 0x30703, // map + 0x92: 0xc01, // p + 0x93: 0x8008, // reversed + 0x94: 0x54d0a, // onpagehide + 0x95: 0x3206, // keygen + 0x96: 0x34109, // minlength + 0x97: 0x3e40a, // ondragover + 0x98: 0x42407, // onerror + 0x9a: 0x2107, // charset + 0x9b: 0x29b06, // method + 0x9c: 0x101, // b + 0x9d: 0x68208, // ontoggle + 0x9e: 0x2bd06, // hidden + 0xa0: 0x3f607, // article + 0xa2: 0x63906, // onshow + 0xa3: 0x64d06, // onsort + 0xa5: 0x57b0f, // contenteditable + 0xa6: 0x66908, // onsubmit + 0xa8: 0x44f09, // oninvalid + 0xaa: 0x202, // br + 0xab: 0x10902, // id + 0xac: 0x5d04, // loop + 0xad: 0x5630a, // onpageshow + 0xb0: 0x2cf04, // href + 0xb2: 0x2210a, // figcaption + 0xb3: 0x2690e, // onautocomplete + 0xb4: 0x49106, // onload + 0xb6: 0x9c04, // rows + 0xb7: 0x1a605, // nonce + 0xb8: 0x68a14, // onunhandledrejection + 0xbb: 0x21306, // center + 0xbc: 0x59406, // onplay + 0xbd: 0x33f02, // h5 + 0xbe: 0x49d07, // listing + 0xbf: 0x57606, // public + 0xc2: 0x23b06, // figure + 0xc3: 0x57a04, // icon + 0xc4: 0x1ab03, // col + 0xc5: 0x47b03, // rel + 0xc6: 0xe605, // media + 0xc7: 0x12109, // autofocus + 0xc8: 0x19a02, // rt + 0xca: 0x2d304, // lang + 0xcc: 0x49908, // datalist + 0xce: 0x2eb06, // iframe + 0xcf: 0x36105, // muted + 0xd0: 0x6140a, // onauxclick + 0xd2: 0x3c02, // as + 0xd6: 0x3fd06, // ondrop + 0xd7: 0x1c90a, // annotation + 0xd8: 0x21908, // fieldset + 0xdb: 0x2cf08, // hreflang + 0xdc: 0x4e70c, // onmouseenter + 0xdd: 0x2a402, // mn + 0xde: 0xe60a, // mediagroup + 0xdf: 0x9805, // meter + 0xe0: 0x56c03, // wbr + 0xe2: 0x63e05, // width + 0xe3: 0x2290c, // onafterprint + 0xe4: 0x30505, // ismap + 0xe5: 0x1505, // value + 0xe7: 0x1303, // nav + 0xe8: 0x54508, // ononline + 0xe9: 0xb604, // mark + 0xea: 0xc303, // low + 0xeb: 0x3ee0b, // ondragstart + 0xef: 0x12f03, // xmp + 0xf0: 0x22407, // caption + 0xf1: 0xd904, // type + 0xf2: 0x70907, // summary + 0xf3: 0x6802, // tt + 0xf4: 0x20809, // translate + 0xf5: 0x1870a, // blockquote + 0xf8: 0x15702, // hr + 0xfa: 0x2705, // tbody + 0xfc: 0x7b07, // picture + 0xfd: 0x5206, // height + 0xfe: 0x19c04, // cite + 0xff: 0x2501, // s + 0x101: 0xff05, // async + 0x102: 0x56f07, // onpaste + 0x103: 0x19507, // onabort + 0x104: 0x2b706, // target + 0x105: 0x14b03, // bdo + 0x106: 0x1f006, // coords + 0x107: 0x5e108, // onresize + 0x108: 0x71908, // template + 0x10a: 0x3a02, // rb + 0x10b: 0x2a50a, // novalidate + 0x10c: 0x460e, // updateviacache + 0x10d: 0x71003, // sup + 0x10e: 0x6c07, // noembed + 0x10f: 0x16b03, // div + 0x110: 0x6f707, // srclang + 0x111: 0x17a09, // draggable + 0x112: 0x67305, // scope + 0x113: 0x5905, // label + 0x114: 0x22f02, // rp + 0x115: 0x23f08, // required + 0x116: 0x3780d, // oncontextmenu + 0x117: 0x5e504, // size + 0x118: 0x5b00a, // spellcheck + 0x119: 0x3f04, // font + 0x11a: 0x9c07, // rowspan + 0x11b: 0x10a07, // default + 0x11d: 0x44307, // oninput + 0x11e: 0x38506, // itemid + 0x11f: 0x5ee04, // code + 0x120: 0xaa07, // acronym + 0x121: 0x3b04, // base + 0x125: 0x2470d, // foreignObject + 0x126: 0x2ca04, // high + 0x127: 0x3cb0e, // referrerpolicy + 0x128: 0x33703, // max + 0x129: 0x59d0a, // onpopstate + 0x12a: 0x2fc02, // h4 + 0x12b: 0x4ac04, // meta + 0x12c: 0x17305, // blink + 0x12e: 0x5f508, // onscroll + 0x12f: 0x59409, // onplaying + 0x130: 0xc113, // allowpaymentrequest + 0x131: 0x19a03, // rtc + 0x132: 0x72b04, // wrap + 0x134: 0x8b08, // frameset + 0x135: 0x32605, // small + 0x137: 0x32006, // header + 0x138: 0x40409, // onemptied + 0x139: 0x34902, // h6 + 0x13a: 0x35908, // multiple + 0x13c: 0x52a06, // prompt + 0x13f: 0x28e09, // challenge + 0x141: 0x4370c, // onhashchange + 0x142: 0x57b07, // content + 0x143: 0x1c90e, // annotation-xml + 0x144: 0x36607, // onclose + 0x145: 0x14d10, // oncanplaythrough + 0x148: 0x5170b, // onmouseover + 0x149: 0x64f08, // sortable + 0x14a: 0xa402, // mo + 0x14b: 0x2cd02, // h3 + 0x14c: 0x2c406, // script + 0x14d: 0x41d07, // onended + 0x14f: 0x64706, // poster + 0x150: 0x7210a, // workertype + 0x153: 0x1f505, // shape + 0x154: 0x4, // abbr + 0x155: 0x1, // a + 0x156: 0x2bf02, // dd + 0x157: 0x71606, // system + 0x158: 0x4ce0e, // onmessageerror + 0x159: 0x36b08, // seamless + 0x15a: 0x2610a, // formaction + 0x15b: 0x6e106, // option + 0x15c: 0x31d04, // math + 0x15d: 0x62609, // onseeking + 0x15e: 0x39c05, // oncut + 0x15f: 0x44c03, // del + 0x160: 0x11005, // title + 0x161: 0x11505, // audio + 0x162: 0x63108, // selected + 0x165: 0x3b40b, // ondragenter + 0x166: 0x46e06, // spacer + 0x167: 0x4a410, // onloadedmetadata + 0x168: 0x44505, // input + 0x16a: 0x58505, // table + 0x16b: 0x41508, // onchange + 0x16e: 0x5f005, // defer + 0x171: 0x50a0a, // onmouseout + 0x172: 0x20504, // slot + 0x175: 0x3704, // nobr + 0x177: 0x1d707, // command + 0x17a: 0x7207, // details + 0x17b: 0x38104, // menu + 0x17c: 0xb903, // kbd + 0x17d: 0x57304, // step + 0x17e: 0x20303, // ins + 0x17f: 0x13c08, // autoplay + 0x182: 0x34103, // min + 0x183: 0x17404, // link + 0x185: 0x40d10, // ondurationchange + 0x186: 0x9202, // td + 0x187: 0x8b05, // frame + 0x18a: 0x2ab08, // datetime + 0x18b: 0x44509, // inputmode + 0x18c: 0x35108, // readonly + 0x18d: 0x21104, // face + 0x18f: 0x5e505, // sizes + 0x191: 0x4b208, // tabindex + 0x192: 0x6db06, // strong + 0x193: 0xba03, // bdi + 0x194: 0x6fe06, // srcset + 0x196: 0x67202, // ms + 0x197: 0x5b507, // checked + 0x198: 0xb105, // align + 0x199: 0x1e507, // section + 0x19b: 0x6e05, // embed + 0x19d: 0x15e07, // bgsound + 0x1a2: 0x49d04, // list + 0x1a3: 0x61e08, // onseeked + 0x1a4: 0x66009, // onstorage + 0x1a5: 0x2f603, // img + 0x1a6: 0xf505, // tfoot + 0x1a9: 0x26913, // onautocompleteerror + 0x1aa: 0x5fd19, // onsecuritypolicyviolation + 0x1ad: 0x9303, // dir + 0x1ae: 0x9307, // dirname + 0x1b0: 0x5a70a, // onprogress + 0x1b2: 0x65709, // onstalled + 0x1b5: 0x66f09, // itemscope + 0x1b6: 0x49904, // data + 0x1b7: 0x3d90b, // ondragleave + 0x1b8: 0x56102, // h2 + 0x1b9: 0x2f706, // mglyph + 0x1ba: 0x16502, // is + 0x1bb: 0x6e50e, // onbeforeunload + 0x1bc: 0x2830d, // typemustmatch + 0x1bd: 0x3ab06, // ondrag + 0x1be: 0x5da07, // onreset + 0x1c0: 0x51106, // output + 0x1c1: 0x12907, // sandbox + 0x1c2: 0x1b209, // plaintext + 0x1c4: 0x34c08, // textarea + 0x1c7: 0xd607, // keytype + 0x1c8: 0x34b05, // mtext + 0x1c9: 0x6b10e, // onvolumechange + 0x1ca: 0x1ea06, // onblur + 0x1cb: 0x58a07, // onpause + 0x1cd: 0x5bc0c, // onratechange + 0x1ce: 0x10705, // aside + 0x1cf: 0x6cf07, // optimum + 0x1d1: 0x45809, // onkeydown + 0x1d2: 0x1c407, // colspan + 0x1d3: 0x1004, // main + 0x1d4: 0x66b03, // sub + 0x1d5: 0x25b06, // object + 0x1d6: 0x55c06, // search + 0x1d7: 0x37206, // sorted + 0x1d8: 0x17003, // big + 0x1d9: 0xb01, // u + 0x1db: 0x26b0c, // autocomplete + 0x1dc: 0xcc02, // tr + 0x1dd: 0xf303, // alt + 0x1df: 0x7804, // samp + 0x1e0: 0x5c812, // onrejectionhandled + 0x1e1: 0x4f30c, // onmouseleave + 0x1e2: 0x28007, // enctype + 0x1e3: 0xa208, // nomodule + 0x1e5: 0x3280f, // allowfullscreen + 0x1e6: 0x5f08, // optgroup + 0x1e8: 0x27c0b, // formenctype + 0x1e9: 0x18106, // legend + 0x1ea: 0x10306, // canvas + 0x1eb: 0x6607, // pattern + 0x1ec: 0x2c208, // noscript + 0x1ed: 0x601, // i + 0x1ee: 0x5d602, // dl + 0x1ef: 0xa702, // ul + 0x1f2: 0x52209, // onmouseup + 0x1f4: 0x1ba05, // track + 0x1f7: 0x3a10a, // ondblclick + 0x1f8: 0x3bf0a, // ondragexit + 0x1fa: 0x8703, // dfn + 0x1fc: 0x26506, // action + 0x1fd: 0x35004, // area + 0x1fe: 0x31607, // marquee + 0x1ff: 0x16d03, // var } const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + @@ -758,26 +760,26 @@ const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + - "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + - "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + - "ignObjectforeignobjectformactionautocompleteerrorformenctype" + - "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + - "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + - "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + - "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + - "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + - "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + - "articleondropzonemptiedondurationchangeonendedonerroronfocus" + - "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + - "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + - "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + - "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + - "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + - "classectionbluronpageshowbronpastepublicontenteditableonpaus" + - "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + - "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + - "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + - "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + - "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + - "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + - "arysupsvgsystemplateworkertypewrap" + "ntrolsectionblurcoordshapecrossoriginslotranslatefacenterfie" + + "ldsetfigcaptionafterprintegrityfigurequiredforeignObjectfore" + + "ignobjectformactionautocompleteerrorformenctypemustmatchalle" + + "ngeformmethodformnovalidatetimeformtargethiddenoscripthigh3h" + + "reflanghttp-equivideonclickiframeimageimglyph4isindexismappl" + + "etitemtypemarqueematheadersmallowfullscreenmaxlength5minleng" + + "th6mtextareadonlymultiplemutedoncloseamlessortedoncontextmen" + + "uitemidoncopyoncuechangeoncutondblclickondragendondragentero" + + "ndragexitemreferrerpolicyondragleaveondragoverondragstarticl" + + "eondropzonemptiedondurationchangeonendedonerroronfocusourceo" + + "nhashchangeoninputmodeloninvalidonkeydownloadonkeypresspacer" + + "onkeyupreloadonlanguagechangeonloadeddatalistingonloadedmeta" + + "databindexonloadendonloadstartonmessageerroronmousedownonmou" + + "seenteronmouseleaveonmousemoveonmouseoutputonmouseoveronmous" + + "eupromptonmousewheelonofflineononlineonpagehidesclassearch2o" + + "npageshowbronpastepublicontenteditableonpausemaponplayingonp" + + "opstateonprogresspellcheckedonratechangeonrejectionhandledon" + + "resetonresizesrcdocodeferonscrollonsecuritypolicyviolationau" + + "xclickonseekedonseekingonselectedonshowidthgrouposteronsorta" + + "bleonstalledonstorageonsubmitemscopedonsuspendontoggleonunha" + + "ndledrejectionbeforeprintonunloadonvolumechangeonwaitingonwh" + + "eeloptimumanifestrongoptionbeforeunloaddressrclangsrcsetstyl" + + "esummarysupsvgsystemplateworkertypewrap" diff --git a/go-controller/vendor/golang.org/x/net/html/doc.go b/go-controller/vendor/golang.org/x/net/html/doc.go index 3a7e5ab176..885c4c5936 100644 --- a/go-controller/vendor/golang.org/x/net/html/doc.go +++ b/go-controller/vendor/golang.org/x/net/html/doc.go @@ -78,16 +78,11 @@ example, to process each anchor node in depth-first order: if err != nil { // ... } - var f func(*html.Node) - f = func(n *html.Node) { + for n := range doc.Descendants() { if n.Type == html.ElementNode && n.Data == "a" { // Do something with n... } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } } - f(doc) The relevant specifications include: https://html.spec.whatwg.org/multipage/syntax.html and diff --git a/go-controller/vendor/golang.org/x/net/html/doctype.go b/go-controller/vendor/golang.org/x/net/html/doctype.go index c484e5a94f..bca3ae9a0c 100644 --- a/go-controller/vendor/golang.org/x/net/html/doctype.go +++ b/go-controller/vendor/golang.org/x/net/html/doctype.go @@ -87,7 +87,7 @@ func parseDoctype(s string) (n *Node, quirks bool) { } } if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && - strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + strings.EqualFold(lastAttr.Val, "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd") { quirks = true } } diff --git a/go-controller/vendor/golang.org/x/net/html/foreign.go b/go-controller/vendor/golang.org/x/net/html/foreign.go index 9da9e9dc42..e8515d8e88 100644 --- a/go-controller/vendor/golang.org/x/net/html/foreign.go +++ b/go-controller/vendor/golang.org/x/net/html/foreign.go @@ -40,8 +40,7 @@ func htmlIntegrationPoint(n *Node) bool { if n.Data == "annotation-xml" { for _, a := range n.Attr { if a.Key == "encoding" { - val := strings.ToLower(a.Val) - if val == "text/html" || val == "application/xhtml+xml" { + if strings.EqualFold(a.Val, "text/html") || strings.EqualFold(a.Val, "application/xhtml+xml") { return true } } diff --git a/go-controller/vendor/golang.org/x/net/html/iter.go b/go-controller/vendor/golang.org/x/net/html/iter.go new file mode 100644 index 0000000000..54be8fd30f --- /dev/null +++ b/go-controller/vendor/golang.org/x/net/html/iter.go @@ -0,0 +1,56 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package html + +import "iter" + +// Ancestors returns an iterator over the ancestors of n, starting with n.Parent. +// +// Mutating a Node or its parents while iterating may have unexpected results. +func (n *Node) Ancestors() iter.Seq[*Node] { + _ = n.Parent // eager nil check + + return func(yield func(*Node) bool) { + for p := n.Parent; p != nil && yield(p); p = p.Parent { + } + } +} + +// ChildNodes returns an iterator over the immediate children of n, +// starting with n.FirstChild. +// +// Mutating a Node or its children while iterating may have unexpected results. +func (n *Node) ChildNodes() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + for c := n.FirstChild; c != nil && yield(c); c = c.NextSibling { + } + } + +} + +// Descendants returns an iterator over all nodes recursively beneath +// n, excluding n itself. Nodes are visited in depth-first preorder. +// +// Mutating a Node or its descendants while iterating may have unexpected results. +func (n *Node) Descendants() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + n.descendants(yield) + } +} + +func (n *Node) descendants(yield func(*Node) bool) bool { + for c := range n.ChildNodes() { + if !yield(c) || !c.descendants(yield) { + return false + } + } + return true +} diff --git a/go-controller/vendor/golang.org/x/net/html/node.go b/go-controller/vendor/golang.org/x/net/html/node.go index 1350eef22c..77741a1950 100644 --- a/go-controller/vendor/golang.org/x/net/html/node.go +++ b/go-controller/vendor/golang.org/x/net/html/node.go @@ -38,6 +38,10 @@ var scopeMarker = Node{Type: scopeMarkerNode} // that it looks like "a". - if z.err == nil && z.buf[z.raw.end-2] == '/' { + // Look for a self-closing token (e.g.
). + // + // Originally, we did this by just checking that the last character of the + // tag (ignoring the closing bracket) was a solidus (/) character, but this + // is not always accurate. + // + // We need to be careful that we don't misinterpret a non-self-closing tag + // as self-closing, as can happen if the tag contains unquoted attribute + // values (i.e.

). + // + // To avoid this, we check that the last non-bracket character of the tag + // (z.raw.end-2) isn't the same character as the last non-quote character of + // the last attribute of the tag (z.pendingAttr[1].end-1), if the tag has + // attributes. + nAttrs := len(z.attr) + if z.err == nil && z.buf[z.raw.end-2] == '/' && (nAttrs == 0 || z.raw.end-2 != z.attr[nAttrs-1][1].end-1) { return SelfClosingTagToken } return StartTagToken diff --git a/go-controller/vendor/golang.org/x/net/http2/client_conn_pool.go b/go-controller/vendor/golang.org/x/net/http2/client_conn_pool.go index 780968d6c1..e81b73e6a7 100644 --- a/go-controller/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/go-controller/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -8,8 +8,8 @@ package http2 import ( "context" - "crypto/tls" "errors" + "net" "net/http" "sync" ) @@ -158,7 +158,7 @@ func (c *dialCall) dial(ctx context.Context, addr string) { // This code decides which ones live or die. // The return value used is whether c was used. // c is never closed. -func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c net.Conn) (used bool, err error) { p.mu.Lock() for _, cc := range p.conns[key] { if cc.CanTakeNewRequest() { @@ -194,8 +194,8 @@ type addConnCall struct { err error } -func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { - cc, err := t.NewClientConn(tc) +func (c *addConnCall) run(t *Transport, key string, nc net.Conn) { + cc, err := t.NewClientConn(nc) p := c.p p.mu.Lock() diff --git a/go-controller/vendor/golang.org/x/net/http2/config.go b/go-controller/vendor/golang.org/x/net/http2/config.go index de58dfb8dc..ca645d9a1a 100644 --- a/go-controller/vendor/golang.org/x/net/http2/config.go +++ b/go-controller/vendor/golang.org/x/net/http2/config.go @@ -60,7 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { return conf } -// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2 // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ diff --git a/go-controller/vendor/golang.org/x/net/http2/config_go124.go b/go-controller/vendor/golang.org/x/net/http2/config_go124.go index e3784123c8..5b516c55ff 100644 --- a/go-controller/vendor/golang.org/x/net/http2/config_go124.go +++ b/go-controller/vendor/golang.org/x/net/http2/config_go124.go @@ -13,7 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { fillNetHTTPConfig(conf, srv.HTTP2) } -// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { fillNetHTTPConfig(conf, tr.HTTP2) } diff --git a/go-controller/vendor/golang.org/x/net/http2/frame.go b/go-controller/vendor/golang.org/x/net/http2/frame.go index 105c3b279c..97bd8b06f7 100644 --- a/go-controller/vendor/golang.org/x/net/http2/frame.go +++ b/go-controller/vendor/golang.org/x/net/http2/frame.go @@ -225,6 +225,11 @@ var fhBytes = sync.Pool{ }, } +func invalidHTTP1LookingFrameHeader() FrameHeader { + fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 ")) + return fh +} + // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader. // Most users should use Framer.ReadFrame instead. func ReadFrameHeader(r io.Reader) (FrameHeader, error) { @@ -503,10 +508,16 @@ func (fr *Framer) ReadFrame() (Frame, error) { return nil, err } if fh.Length > fr.maxReadSize { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, ErrFrameTooLarge } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, err } f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload) @@ -1490,7 +1501,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { - case ":method", ":path", ":scheme", ":authority": + case ":method", ":path", ":scheme", ":authority", ":protocol": isRequest = true case ":status": isResponse = true @@ -1498,7 +1509,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { return pseudoHeaderError(hf.Name) } // Check for duplicates. - // This would be a bad algorithm, but N is 4. + // This would be a bad algorithm, but N is 5. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { diff --git a/go-controller/vendor/golang.org/x/net/http2/http2.go b/go-controller/vendor/golang.org/x/net/http2/http2.go index 7688c356b7..6c18ea230b 100644 --- a/go-controller/vendor/golang.org/x/net/http2/http2.go +++ b/go-controller/vendor/golang.org/x/net/http2/http2.go @@ -38,6 +38,15 @@ var ( logFrameWrites bool logFrameReads bool inTests bool + + // Enabling extended CONNECT by causes browsers to attempt to use + // WebSockets-over-HTTP/2. This results in problems when the server's websocket + // package doesn't support extended CONNECT. + // + // Disable extended CONNECT by default for now. + // + // Issue #71128. + disableExtendedConnectProtocol = true ) func init() { @@ -50,6 +59,9 @@ func init() { logFrameWrites = true logFrameReads = true } + if strings.Contains(e, "http2xconnect=1") { + disableExtendedConnectProtocol = false + } } const ( @@ -141,6 +153,10 @@ func (s Setting) Valid() error { if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -150,21 +166,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { @@ -397,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) { s.v = save } -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// - a non-empty string starting with '/' -// - the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -// -// We used to enforce that the path also didn't start with "//", but -// Google's GFE accepts such paths and Chrome sends them, so ignore -// that part of the spec. See golang.org/issue/19103. -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/') || v == "*" -} - // incomparable is a zero-width, non-comparable type. Adding it to a struct // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). diff --git a/go-controller/vendor/golang.org/x/net/http2/server.go b/go-controller/vendor/golang.org/x/net/http2/server.go index 617b4a4762..51fca38f61 100644 --- a/go-controller/vendor/golang.org/x/net/http2/server.go +++ b/go-controller/vendor/golang.org/x/net/http2/server.go @@ -50,6 +50,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -306,7 +307,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) { if testHookOnConn != nil { testHookOnConn() } @@ -323,12 +324,31 @@ func ConfigureServer(s *http.Server, conf *Server) error { ctx = bc.BaseContext() } conf.ServeConn(c, &ServeConnOpts{ - Context: ctx, - Handler: h, - BaseConfig: hs, + Context: ctx, + Handler: h, + BaseConfig: hs, + SawClientPreface: sawClientPreface, }) } - s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler(hs, c, h, false) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + // + // A connection passed in this method has already had the HTTP/2 preface read from it. + s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + if lg := hs.ErrorLog; lg != nil { + lg.Print(err) + } else { + log.Print(err) + } + go c.Close() + return + } + protoHandler(hs, nc, h, true) + } return nil } @@ -793,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048 func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() - buildCommonHeaderMapsOnce() - cv, ok := commonCanonHeader[v] + cv, ok := httpcommon.CachedCanonicalHeader(v) if ok { return cv } @@ -913,14 +932,18 @@ func (sc *serverConn) serve(conf http2Config) { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, conf.MaxReadFrameSize}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, - {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, - }, + write: settings, }) sc.unackedSettings++ @@ -1045,7 +1068,10 @@ func (sc *serverConn) serve(conf http2Config) { func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { if sc.pingSent { - sc.vlogf("timeout waiting for PING response") + sc.logf("timeout waiting for PING response") + if f := sc.countErrorFunc; f != nil { + f("conn_close_lost_ping") + } sc.conn.Close() return } @@ -1782,6 +1808,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2207,19 +2236,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), + rp := httpcommon.ServerRequestParam{ + Method: f.PseudoValue("method"), + Scheme: f.PseudoValue("scheme"), + Authority: f.PseudoValue("authority"), + Path: f.PseudoValue("path"), + Protocol: f.PseudoValue("protocol"), + } + + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.Protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - isConnect := rp.method == "CONNECT" + isConnect := rp.Method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -2233,12 +2268,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - rp.header = make(http.Header) + header := make(http.Header) + rp.Header = header for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + header.Add(sc.canonicalHeader(hf.Name), hf.Value) } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") + if rp.Authority == "" { + rp.Authority = header.Get("Host") + } + if rp.Protocol != "" { + header.Set(":protocol", rp.Protocol) } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) @@ -2247,7 +2286,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res } bodyOpen := !f.StreamEnded() if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { + if vv, ok := rp.Header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { req.ContentLength = int64(cl) } else { @@ -2263,83 +2302,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return rw, req, nil } -type requestParam struct { - method string - scheme, authority, path string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) { sc.serveG.check() var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + if rp.Scheme == "https" { tlsState = sc.tlsState } - needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue") - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(textproto.TrimString(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol)) - } - requestURI = rp.path + res := httpcommon.NewServerRequest(rp) + if res.InvalidReason != "" { + return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol)) } body := &requestBody{ conn: sc, stream: st, - needsContinue: needsContinue, + needsContinue: res.NeedsContinue, } - req := &http.Request{ - Method: rp.method, - URL: url_, + req := (&http.Request{ + Method: rp.Method, + URL: res.URL, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, + Header: rp.Header, + RequestURI: res.RequestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, - Host: rp.authority, + Host: rp.Authority, Body: body, - Trailer: trailer, - } - req = req.WithContext(st.ctx) - + Trailer: res.Trailer, + }).WithContext(st.ctx) rw := sc.newResponseWriter(st, req) return rw, req, nil } @@ -2880,6 +2874,11 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { return nil } +func (w *responseWriter) EnableFullDuplex() error { + // We always support full duplex responses, so this is a no-op. + return nil +} + func (w *responseWriter) Flush() { w.FlushError() } @@ -3229,12 +3228,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) { // we start in "half closed (remote)" for simplicity. // See further comments at the definition of stateHalfClosedRemote. promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{ + Method: msg.method, + Scheme: msg.url.Scheme, + Authority: msg.url.Host, + Path: msg.url.RequestURI(), + Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE }) if err != nil { // Should not happen, since we've already validated msg.url. diff --git a/go-controller/vendor/golang.org/x/net/http2/transport.go b/go-controller/vendor/golang.org/x/net/http2/transport.go index 0c5f64aa8b..f26356b9cd 100644 --- a/go-controller/vendor/golang.org/x/net/http2/transport.go +++ b/go-controller/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "sort" "strconv" "strings" "sync" @@ -35,6 +34,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -202,6 +202,20 @@ func (t *Transport) markNewGoroutine() { } } +func (t *Transport) now() time.Time { + if t != nil && t.transportTestHooks != nil { + return t.transportTestHooks.group.Now() + } + return time.Now() +} + +func (t *Transport) timeSince(when time.Time) time.Duration { + if t != nil && t.transportTestHooks != nil { + return t.now().Sub(when) + } + return time.Since(when) +} + // newTimer creates a new time.Timer, or a synthetic timer in tests. func (t *Transport) newTimer(d time.Duration) timer { if t.transportTestHooks != nil { @@ -281,8 +295,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) + upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper { + addr := authorityAddr(scheme, authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return erringRoundTripper{err} @@ -293,18 +307,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { // was unknown) go c.Close() } + if scheme == "http" { + return (*unencryptedTransport)(t2) + } return t2 } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, + if t1.TLSNextProto == nil { + t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) + } + t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper { + return upgradeFn("https", authority, c) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + go c.Close() + return erringRoundTripper{err} } - } else { - m["h2"] = upgradeFn + return upgradeFn("http", authority, nc) } return t2, nil } +// unencryptedTransport is a Transport with a RoundTrip method that +// always permits http:// URLs. +type unencryptedTransport Transport + +func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true}) +} + func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -324,7 +357,7 @@ type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic + atomicReused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request getConnCalled bool // used by clientConnPool @@ -335,25 +368,27 @@ type ClientConn struct { idleTimeout time.Duration // or 0 for never idleTimer timer - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + closedOnIdle bool // true if conn was closed for idleness + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) maxFrameSize uint32 maxConcurrentStreams uint32 @@ -363,6 +398,25 @@ type ClientConn struct { initialStreamRecvWindowSize int32 readIdleTimeout time.Duration pingTimeout time.Duration + extendedConnectAllowed bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool + + // pendingResets is the number of RST_STREAM frames we have sent to the peer, + // without confirming that the peer has received them. When we send a RST_STREAM, + // we bundle it with a PING frame, unless a PING is already in flight. We count + // the reset stream against the connection's concurrency limit until we get + // a PING response. This limits the number of requests we'll try to send to a + // completely unresponsive connection. + pendingResets int // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -420,12 +474,12 @@ type clientStream struct { sentHeaders bool // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - num1xx uint8 // number of 1xx responses seen - readClosed bool // peer sent an END_STREAM flag - readAborted bool // read loop reset the stream + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream + totalHeaderSize int64 // total size of 1xx headers seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -530,6 +584,8 @@ type RoundTripOpt struct { // no cached connection is available, RoundTripOpt // will return ErrNoCachedConn. OnlyCachedConn bool + + allowHTTP bool // allow http:// URLs } func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { @@ -562,7 +618,14 @@ func authorityAddr(scheme string, authority string) (addr string) { // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + switch req.URL.Scheme { + case "https": + // Always okay. + case "http": + if !t.AllowHTTP && !opt.allowHTTP { + return nil, errors.New("http2: unencrypted HTTP/2 not enabled") + } + default: return nil, errors.New("http2: unsupported scheme") } @@ -573,7 +636,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1) traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { @@ -598,6 +661,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } } } + if err == errClientConnNotEstablished { + // This ClientConn was created recently, + // this is the first request to use it, + // and the connection is closed and not usable. + // + // In this state, cc.idleTimer will remove the conn from the pool + // when it fires. Stop the timer and remove it here so future requests + // won't try to use this connection. + // + // If the timer has already fired and we're racing it, the redundant + // call to MarkDead is harmless. + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + t.connPool().MarkDead(cc) + } if err != nil { t.vlogf("RoundTrip failure: %v", err) return nil, err @@ -616,9 +695,10 @@ func (t *Transport) CloseIdleConnections() { } var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnNotEstablished = errors.New("http2: client conn could not be established") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -752,11 +832,13 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, + seenSettingsChan: make(chan struct{}), wantSettingsAck: true, readIdleTimeout: conf.SendPingTimeout, pingTimeout: conf.PingTimeout, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), + lastActive: t.now(), } var group synctestGroupInterface if t.transportTestHooks != nil { @@ -960,7 +1042,7 @@ func (cc *ClientConn) State() ClientConnState { return ClientConnState{ Closed: cc.closed, Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, - StreamsActive: len(cc.streams), + StreamsActive: len(cc.streams) + cc.pendingResets, StreamsReserved: cc.streamsReserved, StreamsPending: cc.pendingRequests, LastIdle: cc.lastIdle, @@ -992,16 +1074,40 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // writing it. maxConcurrentOkay = true } else { - maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) + // We can take a new request if the total of + // - active streams; + // - reservation slots for new streams; and + // - streams for which we have sent a RST_STREAM and a PING, + // but received no subsequent frame + // is less than the concurrency limit. + maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && !cc.doNotReuse && int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && !cc.tooIdleLocked() + + // If this connection has never been used for a request and is closed, + // then let it take a request (which will fail). + // If the conn was closed for idleness, we're racing the idle timer; + // don't try to use the conn. (Issue #70515.) + // + // This avoids a situation where an error early in a connection's lifetime + // goes unreported. + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { + st.canTakeNewRequest = true + } + return } +// currentRequestCountLocked reports the number of concurrency slots currently in use, +// including active streams, reserved slots, and reset streams waiting for acknowledgement. +func (cc *ClientConn) currentRequestCountLocked() int { + return len(cc.streams) + cc.streamsReserved + cc.pendingResets +} + func (cc *ClientConn) canTakeNewRequestLocked() bool { st := cc.idleStateLocked() return st.canTakeNewRequest @@ -1014,7 +1120,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1052,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + cc.closedOnIdle = true nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -1168,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() { // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. var errRequestCanceled = errors.New("net/http: request canceled") -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = canonicalHeader(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", fmt.Errorf("invalid Trailer key %q", k) - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - func (cc *ClientConn) responseHeaderTimeout() time.Duration { if cc.t.t1 != nil { return cc.t.t1.ResponseHeaderTimeout @@ -1196,22 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration { return 0 } -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - // actualContentLength returns a sanitized version of // req.ContentLength, where 0 actually means zero (not unknown) and -1 // means unknown. @@ -1257,25 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) donec: make(chan struct{}), } - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true - } + cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression()) go cs.doRequest(req, streamf) @@ -1376,6 +1432,8 @@ func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1387,8 +1445,11 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre cc := cs.cc ctx := cs.ctx - if err := checkConnHeaders(req); err != nil { - return err + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true } // Acquire the new-request lock by writing to reqHeaderMu. @@ -1397,6 +1458,18 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported + } + } + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1535,26 +1608,39 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is // sent by writeRequestBody below, along with any Trailers, // again in form HEADERS{1}, CONTINUATION{0,}) - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return err - } - hasTrailers := trailers != "" - contentLen := actualContentLength(req) - hasBody := contentLen != 0 - hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) + cc.hbuf.Reset() + res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) { + cc.writeHeader(name, value) + }) if err != nil { - return err + return fmt.Errorf("http2: %w", err) } + hdrs := cc.hbuf.Bytes() // Write the request. - endStream := !hasBody && !hasTrailers + endStream := !res.HasBody && !res.HasTrailers cs.sentHeaders = true err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) traceWroteHeaders(cs.trace) return err } +func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) { + return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{ + Request: httpcommon.Request{ + Header: req.Header, + Trailer: req.Trailer, + URL: req.URL, + Host: req.Host, + Method: req.Method, + ActualContentLength: actualContentLength(req), + }, + AddGzipHeader: addGzipHeader, + PeerMaxHeaderListSize: peerMaxHeaderListSize, + DefaultUserAgent: defaultUserAgent, + }, headerf) +} + // cleanupWriteRequest performs post-request tasks. // // If err (the result of writeRequest) is non-nil and the stream is not closed, @@ -1578,6 +1664,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { cs.reqBodyClosed = make(chan struct{}) } bodyClosed := cs.reqBodyClosed + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1602,16 +1689,44 @@ func (cs *clientStream) cleanupWriteRequest(err error) { if cs.sentHeaders { if se, ok := err.(StreamError); ok { if se.Cause != errFromPeer { - cc.writeStreamReset(cs.ID, se.Code, err) + cc.writeStreamReset(cs.ID, se.Code, false, err) } } else { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + // We're cancelling an in-flight request. + // + // This could be due to the server becoming unresponsive. + // To avoid sending too many requests on a dead connection, + // we let the request continue to consume a concurrency slot + // until we can confirm the server is still responding. + // We do this by sending a PING frame along with the RST_STREAM + // (unless a ping is already in flight). + // + // For simplicity, we don't bother tracking the PING payload: + // We reset cc.pendingResets any time we receive a PING ACK. + // + // We skip this if the conn is going to be closed on idle, + // because it's short lived and will probably be closed before + // we get the ping response. + ping := false + if !closeOnIdle { + cc.mu.Lock() + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ + } + cc.mu.Unlock() + } + cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) } } cs.bufPipe.CloseWithError(err) // no-op if already closed } else { if cs.sentHeaders && !cs.sentEndStream { - cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil) } cs.bufPipe.CloseWithError(errRequestCanceled) } @@ -1633,12 +1748,17 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { - cc.lastActive = time.Now() + if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 { + // This is the very first request sent to this connection. + // Return a fatal error which aborts the retry loop. + return errClientConnNotEstablished + } + cc.lastActive = cc.t.now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { + if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) { return nil } cc.pendingRequests++ @@ -1908,214 +2028,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } -func validateHeaders(hdrs http.Header) string { - for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { - return fmt.Sprintf("name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, - // because it may be sensitive. - return fmt.Sprintf("value for header %q", k) - } - } - } - return "" -} - -var errNilRequestURL = errors.New("http2: Request.URI is nil") - -// requires cc.wmu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - if req.URL == nil { - return nil, errNilRequestURL - } - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httpguts.PunycodeHostPort(host) - if err != nil { - return nil, err - } - if !httpguts.ValidHostHeader(host) { - return nil, errors.New("http2: invalid Host header") - } - - var path string - if req.Method != "CONNECT" { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers+trailers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - if err := validateHeaders(req.Header); err != "" { - return nil, fmt.Errorf("invalid HTTP header %s", err) - } - if err := validateHeaders(req.Trailer); err != "" { - return nil, fmt.Errorf("invalid HTTP trailer %s", err) - } - - enumerateHeaders := func(f func(name, value string)) { - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production, see Sections 3.3 and 3.4 of - // [RFC3986]). - f(":authority", host) - m := req.Method - if m == "" { - m = http.MethodGet - } - f(":method", m) - if req.Method != "CONNECT" { - f(":path", path) - f(":scheme", req.URL.Scheme) - } - if trailers != "" { - f("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - } else if asciiEqualFold(k, "connection") || - asciiEqualFold(k, "proxy-connection") || - asciiEqualFold(k, "transfer-encoding") || - asciiEqualFold(k, "upgrade") || - asciiEqualFold(k, "keep-alive") { - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - } else if asciiEqualFold(k, "user-agent") { - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } else if asciiEqualFold(k, "cookie") { - // Per 8.1.2.5 To allow for better compression efficiency, the - // Cookie header field MAY be split into separate header fields, - // each with one or more cookie-pairs. - for _, v := range vv { - for { - p := strings.IndexByte(v, ';') - if p < 0 { - break - } - f("cookie", v[:p]) - p++ - // strip space after semicolon if any. - for p+1 <= len(v) && v[p] == ' ' { - p++ - } - v = v[p:] - } - if len(v) > 0 { - f("cookie", v) - } - } - continue - } - - for _, v := range vv { - f(k, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - f("accept-encoding", "gzip") - } - if !didUA { - f("user-agent", defaultUserAgent) - } - } - - // Do a first pass over the headers counting bytes to ensure - // we don't exceed cc.peerMaxHeaderListSize. This is done as a - // separate pass before encoding the headers to prevent - // modifying the hpack state. - hlSize := uint64(0) - enumerateHeaders(func(name, value string) { - hf := hpack.HeaderField{Name: name, Value: value} - hlSize += uint64(hf.Size()) - }) - - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - trace := httptrace.ContextClientTrace(req.Context()) - traceHeaders := traceHasWroteHeaderField(trace) - - // Header list size is ok. Write the headers. - enumerateHeaders(func(name, value string) { - name, ascii := lowerHeader(name) - if !ascii { - // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header - // field names have to be ASCII characters (just as in HTTP/1.x). - return - } - cc.writeHeader(name, value) - if traceHeaders { - traceWroteHeaderField(trace, name, value) - } - }) - - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - // requires cc.wmu be held. func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { cc.hbuf.Reset() @@ -2132,7 +2044,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { } for k, vv := range trailer { - lowKey, ascii := lowerHeader(k) + lowKey, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -2180,10 +2092,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = time.Now() + cc.lastActive = cc.t.now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() + cc.lastIdle = cc.t.now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2243,7 +2155,6 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - cc.t.connPool().MarkDead(cc) defer cc.closeConn() defer close(cc.readerDone) @@ -2267,6 +2178,27 @@ func (rl *clientConnReadLoop) cleanup() { } cc.closed = true + // If the connection has never been used, and has been open for only a short time, + // leave it in the connection pool for a little while. + // + // This avoids a situation where new connections are constantly created, + // added to the pool, fail, and are removed from the pool, without any error + // being surfaced to the user. + unusedWaitTime := 5 * time.Second + if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { + unusedWaitTime = cc.idleTimeout + } + idleTime := cc.t.now().Sub(cc.lastActive) + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { + cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.t.connPool().MarkDead(cc) + }) + } else { + cc.mu.Unlock() // avoid any deadlocks in MarkDead + cc.t.connPool().MarkDead(cc) + cc.mu.Lock() + } + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2278,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() { } cc.cond.Broadcast() cc.mu.Unlock() + + if !cc.seenSettings { + // If we have a pending request that wants extended CONNECT, + // let it continue and fail with the connection error. + cc.extendedConnectAllowed = true + close(cc.seenSettingsChan) + } } // countReadFrameError calls Transport.CountError with a string @@ -2324,7 +2263,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2376,7 +2315,7 @@ func (rl *clientConnReadLoop) run() error { } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2464,7 +2403,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra Status: status + " " + http.StatusText(statusCode), } for _, hf := range regularFields { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) if key == "Trailer" { t := res.Trailer if t == nil { @@ -2472,7 +2411,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra res.Trailer = t } foreachHeaderElement(hf.Value, func(v string) { - t[canonicalHeader(v)] = nil + t[httpcommon.CanonicalHeader(v)] = nil }) } else { vv := header[key] @@ -2494,15 +2433,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra if f.StreamEnded() { return nil, errors.New("1xx informational response with END_STREAM flag") } - cs.num1xx++ - const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http - if cs.num1xx > max1xxResponses { - return nil, errors.New("http2: too many 1xx informational responses") - } if fn := cs.get1xxTraceFunc(); fn != nil { + // If the 1xx response is being delivered to the user, + // then they're responsible for limiting the number + // of responses. if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { return nil, err } + } else { + // If the user didn't examine the 1xx response, then we + // limit the size of all 1xx headers. + // + // This differs a bit from the HTTP/1 implementation, which + // limits the size of all 1xx headers plus the final response. + // Use the larger limit of MaxHeaderListSize and + // net/http.Transport.MaxResponseHeaderBytes. + limit := int64(cs.cc.t.maxHeaderListSize()) + if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit { + limit = t1.MaxResponseHeaderBytes + } + for _, h := range f.Fields { + cs.totalHeaderSize += int64(h.Size()) + } + if cs.totalHeaderSize > limit { + if VerboseLogs { + log.Printf("http2: 1xx informational responses too large") + } + return nil, errors.New("header list too large") + } } if statusCode == 100 { traceGot100Continue(cs.trace) @@ -2577,7 +2535,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr trailer := make(http.Header) for _, hf := range f.RegularFields() { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) trailer[key] = append(trailer[key], hf.Value) } cs.trailer = trailer @@ -2686,7 +2644,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2821,9 +2779,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -2917,6 +2888,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -2934,6 +2920,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -2942,7 +2929,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -2971,7 +2958,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -3046,6 +3033,12 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { close(c) delete(cc.pings, f.Data) } + if cc.pendingResets > 0 { + // See clientStream.cleanupWriteRequest. + cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true + cc.cond.Broadcast() + } return nil } cc := rl.cc @@ -3068,20 +3061,27 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { return ConnectionError(ErrCodeProtocol) } -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { +// writeStreamReset sends a RST_STREAM frame. +// When ping is true, it also sends a PING frame with a random payload. +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) { // TODO: map err to more interesting error codes, once the // HTTP community comes up with some. But currently for // RST_STREAM there's no equivalent to GOAWAY frame's debug // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) + if ping { + var payload [8]byte + rand.Read(payload[:]) + cc.fr.WritePing(false, payload) + } cc.bw.Flush() cc.wmu.Unlock() } var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize ) func (cc *ClientConn) logf(format string, args ...interface{}) { @@ -3228,7 +3228,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Since(cc.lastActive) + ci.IdleTime = cc.t.timeSince(cc.lastActive) } cc.mu.Unlock() @@ -3265,16 +3265,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { } } -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { if trace != nil { return trace.Got1xxResponse diff --git a/go-controller/vendor/golang.org/x/net/http2/unencrypted.go b/go-controller/vendor/golang.org/x/net/http2/unencrypted.go new file mode 100644 index 0000000000..b2de211613 --- /dev/null +++ b/go-controller/vendor/golang.org/x/net/http2/unencrypted.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "crypto/tls" + "errors" + "net" +) + +const nextProtoUnencryptedHTTP2 = "unencrypted_http2" + +// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn. +// +// TLSNextProto functions accept a *tls.Conn. +// +// When passing an unencrypted HTTP/2 connection to a TLSNextProto function, +// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection. +// To be extra careful about mistakes (accidentally dropping TLS encryption in a place +// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method +// that returns the actual connection we want to use. +func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) { + conner, ok := tc.NetConn().(interface { + UnencryptedNetConn() net.Conn + }) + if !ok { + return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff") + } + return conner.UnencryptedNetConn(), nil +} diff --git a/go-controller/vendor/golang.org/x/net/http2/write.go b/go-controller/vendor/golang.org/x/net/http2/write.go index 6ff6bee7e9..fdb35b9477 100644 --- a/go-controller/vendor/golang.org/x/net/http2/write.go +++ b/go-controller/vendor/golang.org/x/net/http2/write.go @@ -13,6 +13,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) // writeFramer is implemented by any type that is used to write frames. @@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } for _, k := range keys { vv := h[k] - k, ascii := lowerHeader(k) + k, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). diff --git a/go-controller/vendor/golang.org/x/net/internal/httpcommon/ascii.go b/go-controller/vendor/golang.org/x/net/internal/httpcommon/ascii.go new file mode 100644 index 0000000000..ed14da5afc --- /dev/null +++ b/go-controller/vendor/golang.org/x/net/internal/httpcommon/ascii.go @@ -0,0 +1,53 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import "strings" + +// The HTTP protocols are defined in terms of ASCII, not Unicode. This file +// contains helper functions which may use Unicode-aware functions which would +// otherwise be unsafe and could introduce vulnerabilities if used improperly. + +// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func asciiEqualFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lower(s[i]) != lower(t[i]) { + return false + } + } + return true +} + +// lower returns the ASCII lowercase version of b. +func lower(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// isASCIIPrint returns whether s is ASCII and printable according to +// https://tools.ietf.org/html/rfc20#section-4.2. +func isASCIIPrint(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > '~' { + return false + } + } + return true +} + +// asciiToLower returns the lowercase version of s if s is ASCII and printable, +// and whether or not it was. +func asciiToLower(s string) (lower string, ok bool) { + if !isASCIIPrint(s) { + return "", false + } + return strings.ToLower(s), true +} diff --git a/go-controller/vendor/golang.org/x/net/http2/headermap.go b/go-controller/vendor/golang.org/x/net/internal/httpcommon/headermap.go similarity index 74% rename from go-controller/vendor/golang.org/x/net/http2/headermap.go rename to go-controller/vendor/golang.org/x/net/internal/httpcommon/headermap.go index 149b3dd20e..92483d8e41 100644 --- a/go-controller/vendor/golang.org/x/net/http2/headermap.go +++ b/go-controller/vendor/golang.org/x/net/internal/httpcommon/headermap.go @@ -1,11 +1,11 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package http2 +package httpcommon import ( - "net/http" + "net/textproto" "sync" ) @@ -82,13 +82,15 @@ func buildCommonHeaderMaps() { commonLowerHeader = make(map[string]string, len(common)) commonCanonHeader = make(map[string]string, len(common)) for _, v := range common { - chk := http.CanonicalHeaderKey(v) + chk := textproto.CanonicalMIMEHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk } } -func lowerHeader(v string) (lower string, ascii bool) { +// LowerHeader returns the lowercase form of a header name, +// used on the wire for HTTP/2 and HTTP/3 requests. +func LowerHeader(v string) (lower string, ascii bool) { buildCommonHeaderMapsOnce() if s, ok := commonLowerHeader[v]; ok { return s, true @@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) { return asciiToLower(v) } -func canonicalHeader(v string) string { +// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".) +func CanonicalHeader(v string) string { buildCommonHeaderMapsOnce() if s, ok := commonCanonHeader[v]; ok { return s } - return http.CanonicalHeaderKey(v) + return textproto.CanonicalMIMEHeaderKey(v) +} + +// CachedCanonicalHeader returns the canonical form of a well-known header name. +func CachedCanonicalHeader(v string) (string, bool) { + buildCommonHeaderMapsOnce() + s, ok := commonCanonHeader[v] + return s, ok } diff --git a/go-controller/vendor/golang.org/x/net/internal/httpcommon/request.go b/go-controller/vendor/golang.org/x/net/internal/httpcommon/request.go new file mode 100644 index 0000000000..4b70553179 --- /dev/null +++ b/go-controller/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -0,0 +1,467 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import ( + "context" + "errors" + "fmt" + "net/http/httptrace" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +var ( + ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit") +) + +// Request is a subset of http.Request. +// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http +// without creating a dependency cycle. +type Request struct { + URL *url.URL + Method string + Host string + Header map[string][]string + Trailer map[string][]string + ActualContentLength int64 // 0 means 0, -1 means unknown +} + +// EncodeHeadersParam is parameters to EncodeHeaders. +type EncodeHeadersParam struct { + Request Request + + // AddGzipHeader indicates that an "accept-encoding: gzip" header should be + // added to the request. + AddGzipHeader bool + + // PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting. + PeerMaxHeaderListSize uint64 + + // DefaultUserAgent is the User-Agent header to send when the request + // neither contains a User-Agent nor disables it. + DefaultUserAgent string +} + +// EncodeHeadersParam is the result of EncodeHeaders. +type EncodeHeadersResult struct { + HasBody bool + HasTrailers bool +} + +// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3. +// It validates a request and calls headerf with each pseudo-header and header +// for the request. +// The headerf function is called with the validated, canonicalized header name. +func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { + req := param.Request + + // Check for invalid connection-level headers. + if err := checkConnHeaders(req.Header); err != nil { + return res, err + } + + if req.URL == nil { + return res, errors.New("Request.URL is nil") + } + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return res, err + } + if !httpguts.ValidHostHeader(host) { + return res, errors.New("invalid Host header") + } + + // isNormalConnect is true if this is a non-extended CONNECT request. + isNormalConnect := false + var protocol string + if vv := req.Header[":protocol"]; len(vv) > 0 { + protocol = vv[0] + } + if req.Method == "CONNECT" && protocol == "" { + isNormalConnect = true + } else if protocol != "" && req.Method != "CONNECT" { + return res, errors.New("invalid :protocol header in non-CONNECT request") + } + + // Validate the path, except for non-extended CONNECT requests which have no path. + var path string + if !isNormalConnect { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return res, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers+trailers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + if err := validateHeaders(req.Header); err != "" { + return res, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return res, fmt.Errorf("invalid HTTP trailer %s", err) + } + + trailers, err := commaSeparatedTrailers(req.Trailer) + if err != nil { + return res, err + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production, see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + m := req.Method + if m == "" { + m = "GET" + } + f(":method", m) + if !isNormalConnect { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if protocol != "" { + f(":protocol", protocol) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if asciiEqualFold(k, "connection") || + asciiEqualFold(k, "proxy-connection") || + asciiEqualFold(k, "transfer-encoding") || + asciiEqualFold(k, "upgrade") || + asciiEqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if asciiEqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } else if asciiEqualFold(k, "cookie") { + // Per 8.1.2.5 To allow for better compression efficiency, the + // Cookie header field MAY be split into separate header fields, + // each with one or more cookie-pairs. + for _, v := range vv { + for { + p := strings.IndexByte(v, ';') + if p < 0 { + break + } + f("cookie", v[:p]) + p++ + // strip space after semicolon if any. + for p+1 <= len(v) && v[p] == ' ' { + p++ + } + v = v[p:] + } + if len(v) > 0 { + f("cookie", v) + } + } + continue + } else if k == ":protocol" { + // :protocol pseudo-header was already sent above. + continue + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, req.ActualContentLength) { + f("content-length", strconv.FormatInt(req.ActualContentLength, 10)) + } + if param.AddGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", param.DefaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + if param.PeerMaxHeaderListSize > 0 { + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > param.PeerMaxHeaderListSize { + return res, ErrRequestHeaderListSize + } + } + + trace := httptrace.ContextClientTrace(ctx) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name, ascii := LowerHeader(name) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + return + } + + headerf(name, value) + + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(name, []string{value}) + } + }) + + res.HasBody = req.ActualContentLength != 0 + res.HasTrailers = trailers != "" + return res, nil +} + +// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header +// for a request. +func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool { + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !disableCompression && + len(header["Accept-Encoding"]) == 0 && + len(header["Range"]) == 0 && + method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + return true + } + return false +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// +// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3 +// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1 +// +// Certain headers are special-cased as okay but not transmitted later. +// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding. +func checkConnHeaders(h map[string][]string) error { + if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Upgrade request header: %q", vv) + } + if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv) + } + if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { + return fmt.Errorf("invalid Connection request header: %q", vv) + } + return nil +} + +func commaSeparatedTrailers(trailer map[string][]string) (string, error) { + keys := make([]string, 0, len(trailer)) + for k := range trailer { + k = CanonicalHeader(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", fmt.Errorf("invalid Trailer key %q", k) + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// - a non-empty string starting with '/' +// - the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} + +func validateHeaders(hdrs map[string][]string) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } + } + return "" +} + +// shouldSendReqContentLength reports whether we should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// ServerRequestParam is parameters to NewServerRequest. +type ServerRequestParam struct { + Method string + Scheme, Authority, Path string + Protocol string + Header map[string][]string +} + +// ServerRequestResult is the result of NewServerRequest. +type ServerRequestResult struct { + // Various http.Request fields. + URL *url.URL + RequestURI string + Trailer map[string][]string + + NeedsContinue bool // client provided an "Expect: 100-continue" header + + // If the request should be rejected, this is a short string suitable for passing + // to the http2 package's CountError function. + // It might be a bit odd to return errors this way rather than returing an error, + // but this ensures we don't forget to include a CountError reason. + InvalidReason string +} + +func NewServerRequest(rp ServerRequestParam) ServerRequestResult { + needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue") + if needsContinue { + delete(rp.Header, "Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.Header["Cookie"]; len(cookies) > 1 { + rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")} + } + + // Setup Trailers + var trailer map[string][]string + for _, v := range rp.Header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(map[string][]string) + } + trailer[key] = nil + } + } + } + delete(rp.Header, "Trailer") + + // "':authority' MUST NOT include the deprecated userinfo subcomponent + // for "http" or "https" schemed URIs." + // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8 + if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") { + return ServerRequestResult{ + InvalidReason: "userinfo_in_authority", + } + } + + var url_ *url.URL + var requestURI string + if rp.Method == "CONNECT" && rp.Protocol == "" { + url_ = &url.URL{Host: rp.Authority} + requestURI = rp.Authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.Path) + if err != nil { + return ServerRequestResult{ + InvalidReason: "bad_path", + } + } + requestURI = rp.Path + } + + return ServerRequestResult{ + URL: url_, + NeedsContinue: needsContinue, + RequestURI: requestURI, + Trailer: trailer, + } +} diff --git a/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go b/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go index cebde7634f..3c9576e2d8 100644 --- a/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go +++ b/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_ppc64.go @@ -4,27 +4,27 @@ package socket type iovec struct { - Base *byte - Len uint64 + Base *byte + Len uint64 } type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen uint32 - Control *byte - Controllen uint32 - Flags int32 + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 } type cmsghdr struct { - Len uint32 - Level int32 - Type int32 + Len uint32 + Level int32 + Type int32 } const ( - sizeofIovec = 0x10 - sizeofMsghdr = 0x30 + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 ) diff --git a/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go b/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go index cebde7634f..3c9576e2d8 100644 --- a/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go +++ b/go-controller/vendor/golang.org/x/net/internal/socket/zsys_openbsd_riscv64.go @@ -4,27 +4,27 @@ package socket type iovec struct { - Base *byte - Len uint64 + Base *byte + Len uint64 } type msghdr struct { - Name *byte - Namelen uint32 - Iov *iovec - Iovlen uint32 - Control *byte - Controllen uint32 - Flags int32 + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 } type cmsghdr struct { - Len uint32 - Level int32 - Type int32 + Len uint32 + Level int32 + Type int32 } const ( - sizeofIovec = 0x10 - sizeofMsghdr = 0x30 + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 ) diff --git a/go-controller/vendor/golang.org/x/net/proxy/per_host.go b/go-controller/vendor/golang.org/x/net/proxy/per_host.go index d7d4b8b6e3..32bdf435ec 100644 --- a/go-controller/vendor/golang.org/x/net/proxy/per_host.go +++ b/go-controller/vendor/golang.org/x/net/proxy/per_host.go @@ -7,6 +7,7 @@ package proxy import ( "context" "net" + "net/netip" "strings" ) @@ -57,7 +58,8 @@ func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net. } func (p *PerHost) dialerForRequest(host string) Dialer { - if ip := net.ParseIP(host); ip != nil { + if nip, err := netip.ParseAddr(host); err == nil { + ip := net.IP(nip.AsSlice()) for _, net := range p.bypassNetworks { if net.Contains(ip) { return p.bypass @@ -108,8 +110,8 @@ func (p *PerHost) AddFromString(s string) { } continue } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) + if nip, err := netip.ParseAddr(host); err == nil { + p.AddIP(net.IP(nip.AsSlice())) continue } if strings.HasPrefix(host, "*.") { diff --git a/go-controller/vendor/golang.org/x/net/websocket/websocket.go b/go-controller/vendor/golang.org/x/net/websocket/websocket.go index ac76165ceb..3448d20395 100644 --- a/go-controller/vendor/golang.org/x/net/websocket/websocket.go +++ b/go-controller/vendor/golang.org/x/net/websocket/websocket.go @@ -6,9 +6,10 @@ // as specified in RFC 6455. // // This package currently lacks some features found in an alternative -// and more actively maintained WebSocket package: +// and more actively maintained WebSocket packages: // -// https://pkg.go.dev/github.com/coder/websocket +// - [github.com/gorilla/websocket] +// - [github.com/coder/websocket] package websocket // import "golang.org/x/net/websocket" import ( diff --git a/go-controller/vendor/golang.org/x/oauth2/README.md b/go-controller/vendor/golang.org/x/oauth2/README.md index 781770c204..48dbb9d84c 100644 --- a/go-controller/vendor/golang.org/x/oauth2/README.md +++ b/go-controller/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/go-controller/vendor/golang.org/x/oauth2/oauth2.go b/go-controller/vendor/golang.org/x/oauth2/oauth2.go index 09f6a49b80..74f052aa9f 100644 --- a/go-controller/vendor/golang.org/x/oauth2/oauth2.go +++ b/go-controller/vendor/golang.org/x/oauth2/oauth2.go @@ -56,7 +56,7 @@ type Config struct { // the OAuth flow, after the resource owner's URLs. RedirectURL string - // Scope specifies optional requested permissions. + // Scopes specifies optional requested permissions. Scopes []string // authStyleCache caches which auth style to use when Endpoint.AuthStyle is diff --git a/go-controller/vendor/golang.org/x/oauth2/pkce.go b/go-controller/vendor/golang.org/x/oauth2/pkce.go index 50593b6dfe..6a95da975c 100644 --- a/go-controller/vendor/golang.org/x/oauth2/pkce.go +++ b/go-controller/vendor/golang.org/x/oauth2/pkce.go @@ -21,7 +21,7 @@ const ( // // A fresh verifier should be generated for each authorization. // S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange // (or Config.DeviceAccessToken). func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be @@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string { } // S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/go-controller/vendor/golang.org/x/sync/errgroup/errgroup.go b/go-controller/vendor/golang.org/x/sync/errgroup/errgroup.go index 948a3ee63d..a4ea5d14f1 100644 --- a/go-controller/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/go-controller/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -46,7 +46,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := withCancelCause(ctx) + ctx, cancel := context.WithCancelCause(ctx) return &Group{cancel: cancel}, ctx } @@ -118,6 +118,7 @@ func (g *Group) TryGo(f func() error) bool { // SetLimit limits the number of active goroutines in this group to at most n. // A negative value indicates no limit. +// A limit of zero will prevent any new goroutines from being added. // // Any subsequent call to the Go method will block until it can add an active // goroutine without exceeding the configured limit. diff --git a/go-controller/vendor/golang.org/x/sync/errgroup/go120.go b/go-controller/vendor/golang.org/x/sync/errgroup/go120.go deleted file mode 100644 index f93c740b63..0000000000 --- a/go-controller/vendor/golang.org/x/sync/errgroup/go120.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - return context.WithCancelCause(parent) -} diff --git a/go-controller/vendor/golang.org/x/sync/errgroup/pre_go120.go b/go-controller/vendor/golang.org/x/sync/errgroup/pre_go120.go deleted file mode 100644 index 88ce33434e..0000000000 --- a/go-controller/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - ctx, cancel := context.WithCancel(parent) - return ctx, func(error) { cancel() } -} diff --git a/go-controller/vendor/golang.org/x/sys/unix/auxv.go b/go-controller/vendor/golang.org/x/sys/unix/auxv.go new file mode 100644 index 0000000000..37a82528f5 --- /dev/null +++ b/go-controller/vendor/golang.org/x/sys/unix/auxv.go @@ -0,0 +1,36 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs. +// The returned slice is always a fresh copy, owned by the caller. +// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed, +// which happens in some locked-down environments and build modes. +func Auxv() ([][2]uintptr, error) { + vec := runtime_getAuxv() + vecLen := len(vec) + + if vecLen == 0 { + return nil, syscall.ENOENT + } + + if vecLen%2 != 0 { + return nil, syscall.EINVAL + } + + result := make([]uintptr, vecLen) + copy(result, vec) + return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil +} diff --git a/go-controller/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/go-controller/vendor/golang.org/x/sys/unix/auxv_unsupported.go new file mode 100644 index 0000000000..1200487f2e --- /dev/null +++ b/go-controller/vendor/golang.org/x/sys/unix/auxv_unsupported.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import "syscall" + +func Auxv() ([][2]uintptr, error) { + return nil, syscall.ENOTSUP +} diff --git a/go-controller/vendor/golang.org/x/sys/unix/ioctl_linux.go b/go-controller/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680eab8..7ca4fa12aa 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/go-controller/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { return &value, err } +// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC +// association for the network device specified by ifname. +func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) + return &value, err +} + +// IoctlGetHwTstamp retrieves the hardware timestamping configuration +// for the network device specified by ifname. +func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := HwTstampConfig{} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd) + return &value, err +} + +// IoctlSetHwTstamp updates the hardware timestamping configuration for +// the network device specified by ifname. +func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error { + ifr, err := NewIfreq(ifname) + if err != nil { + return err + } + ifrd := ifr.withData(unsafe.Pointer(cfg)) + return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd) +} + +// FdToClockID derives the clock ID from the file descriptor number +// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is +// suitable for system calls like ClockGettime. +func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) } + +// IoctlPtpClockGetcaps returns the description of a given PTP device. +func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) { + var value PtpClockCaps + err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetPrecise returns a description of the clock +// offset compared to the system clock. +func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) { + var value PtpSysOffsetPrecise + err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetExtended returns an extended description of the +// clock offset compared to the system clock. The samples parameter +// specifies the desired number of measurements. +func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) { + value := PtpSysOffsetExtended{Samples: uint32(samples)} + err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinGetfunc returns the configuration of the specified +// I/O pin on given PTP device. +func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) { + value := PtpPinDesc{Index: uint32(index)} + err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinSetfunc updates configuration of the specified PTP +// I/O pin. +func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error { + return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd)) +} + +// IoctlPtpPeroutRequest configures the periodic output mode of the +// PTP I/O pins. +func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error { + return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r)) +} + +// IoctlPtpExttsRequest configures the external timestamping mode +// of the PTP I/O pins. +func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error { + return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r)) +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. diff --git a/go-controller/vendor/golang.org/x/sys/unix/mkerrors.sh b/go-controller/vendor/golang.org/x/sys/unix/mkerrors.sh index ac54ecaba0..6ab02b6c31 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/go-controller/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -158,6 +158,16 @@ includes_Linux=' #endif #define _GNU_SOURCE +// See the description in unix/linux/types.go +#if defined(__ARM_EABI__) || \ + (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \ + (defined(__powerpc__) && (!defined(__powerpc64__))) +# ifdef _TIME_BITS +# undef _TIME_BITS +# endif +# define _TIME_BITS 32 +#endif + // is broken on powerpc64, as it fails to include definitions of // these structures. We just include them copied from . #if defined(__powerpc__) @@ -256,6 +266,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -527,6 +538,7 @@ ccflags="$@" $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^PTP_/ || $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || diff --git a/go-controller/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/go-controller/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 97cb916f2c..be8c002070 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/go-controller/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/go-controller/vendor/golang.org/x/sys/unix/syscall_linux.go b/go-controller/vendor/golang.org/x/sys/unix/syscall_linux.go index f08abd434f..230a94549a 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/go-controller/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1860,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys ClockSettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CloseRange(first uint, last uint, flags uint) (err error) diff --git a/go-controller/vendor/golang.org/x/sys/unix/syscall_solaris.go b/go-controller/vendor/golang.org/x/sys/unix/syscall_solaris.go index 21974af064..abc3955477 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/go-controller/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) { func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } + +// Ucred Helpers +// See ucred(3c) and getpeerucred(3c) + +//sys getpeerucred(fd uintptr, ucred *uintptr) (err error) +//sys ucredFree(ucred uintptr) = ucred_free +//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get +//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid +//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid +//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid +//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid +//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid +//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid +//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid + +// Ucred is an opaque struct that holds user credentials. +type Ucred struct { + ucred uintptr +} + +// We need to ensure that ucredFree is called on the underlying ucred +// when the Ucred is garbage collected. +func ucredFinalizer(u *Ucred) { + ucredFree(u.ucred) +} + +func GetPeerUcred(fd uintptr) (*Ucred, error) { + var ucred uintptr + err := getpeerucred(fd, &ucred) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func UcredGet(pid int) (*Ucred, error) { + ucred, err := ucredGet(pid) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func (u *Ucred) Geteuid() int { + defer runtime.KeepAlive(u) + return ucredGeteuid(u.ucred) +} + +func (u *Ucred) Getruid() int { + defer runtime.KeepAlive(u) + return ucredGetruid(u.ucred) +} + +func (u *Ucred) Getsuid() int { + defer runtime.KeepAlive(u) + return ucredGetsuid(u.ucred) +} + +func (u *Ucred) Getegid() int { + defer runtime.KeepAlive(u) + return ucredGetegid(u.ucred) +} + +func (u *Ucred) Getrgid() int { + defer runtime.KeepAlive(u) + return ucredGetrgid(u.ucred) +} + +func (u *Ucred) Getsgid() int { + defer runtime.KeepAlive(u) + return ucredGetsgid(u.ucred) +} + +func (u *Ucred) Getpid() int { + defer runtime.KeepAlive(u) + return ucredGetpid(u.ucred) +} diff --git a/go-controller/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/go-controller/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac1d..7bf5c04bb0 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/go-controller/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) @@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) { // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ func isSpecialPath(path []byte) (v bool) { var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} var i, j int for i = 0; i < len(special); i++ { @@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { //sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT //sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT //sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT + +func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg) + runtime.ExitSyscall() + val = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) { + switch op.(type) { + case *Flock_t: + err = FcntlFlock(fd, cmd, op.(*Flock_t)) + if err != nil { + ret = -1 + } + return + case int: + return FcntlInt(fd, cmd, op.(int)) + case *F_cnvrt: + return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt)))) + case unsafe.Pointer: + return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer))) + default: + return -1, EINVAL + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO: use LE call instead if the call is implemented + originalOffset, err := Seek(infd, 0, SEEK_CUR) + if err != nil { + return -1, err + } + //start reading data from in_fd + if offset != nil { + _, err := Seek(infd, *offset, SEEK_SET) + if err != nil { + return -1, err + } + } + + buf := make([]byte, count) + readBuf := make([]byte, 0) + var n int = 0 + for i := 0; i < count; i += n { + n, err := Read(infd, buf) + if n == 0 { + if err != nil { + return -1, err + } else { // EOF + break + } + } + readBuf = append(readBuf, buf...) + buf = buf[0:0] + } + + n2, err := Write(outfd, readBuf) + if err != nil { + return -1, err + } + + //When sendfile() returns, this variable will be set to the + // offset of the byte following the last byte that was read. + if offset != nil { + *offset = *offset + int64(n) + // If offset is not NULL, then sendfile() does not modify the file + // offset of in_fd + _, err := Seek(infd, originalOffset, SEEK_SET) + if err != nil { + return -1, err + } + } + return n2, nil +} diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux.go index de3b462489..4f432bfe8f 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -321,6 +321,9 @@ const ( AUDIT_INTEGRITY_STATUS = 0x70a AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f + AUDIT_IPE_ACCESS = 0x58c + AUDIT_IPE_CONFIG_CHANGE = 0x58d + AUDIT_IPE_POLICY_LOAD = 0x58e AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 @@ -489,6 +492,7 @@ const ( BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 @@ -1166,6 +1170,7 @@ const ( EXTA = 0xe EXTB = 0xf F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_ALLOCATE_RANGE = 0x0 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -1240,6 +1245,7 @@ const ( FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 + FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 @@ -1325,8 +1331,10 @@ const ( FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 + F_CREATED_QUERY = 0x404 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 + F_DUPFD_QUERY = 0x403 F_EXLCK = 0x4 F_GETFD = 0x1 F_GETFL = 0x3 @@ -1546,6 +1554,7 @@ const ( IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e IPPROTO_SCTP = 0x84 + IPPROTO_SMC = 0x100 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -1618,6 +1627,8 @@ const ( IPV6_UNICAST_IF = 0x4c IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 IP_ADD_SOURCE_MEMBERSHIP = 0x27 @@ -1799,6 +1810,8 @@ const ( LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 + LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_HALT = 0xcdef0123 @@ -1860,6 +1873,7 @@ const ( MADV_UNMERGEABLE = 0xd MADV_WILLNEED = 0x3 MADV_WIPEONFORK = 0x12 + MAP_DROPPABLE = 0x8 MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 @@ -1924,6 +1938,7 @@ const ( MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 MNT_ID_REQ_SIZE_VER1 = 0x20 + MNT_NS_INFO_SIZE_VER0 = 0x10 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -1959,6 +1974,7 @@ const ( MSG_PEEK = 0x2 MSG_PROXY = 0x10 MSG_RST = 0x1000 + MSG_SOCK_DEVMEM = 0x2000000 MSG_SYN = 0x400 MSG_TRUNC = 0x20 MSG_TRYHARD = 0x4 @@ -2075,6 +2091,7 @@ const ( NFC_ATR_REQ_MAXSIZE = 0x40 NFC_ATR_RES_GB_MAXSIZE = 0x2f NFC_ATR_RES_MAXSIZE = 0x40 + NFC_ATS_MAXSIZE = 0x14 NFC_COMM_ACTIVE = 0x0 NFC_COMM_PASSIVE = 0x1 NFC_DEVICE_NAME_MAXSIZE = 0x8 @@ -2155,6 +2172,7 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_BITWISE_BOOL = 0x0 NFT_CHAIN_FLAGS = 0x7 NFT_CHAIN_MAXNAMELEN = 0x100 NFT_CT_MAX = 0x17 @@ -2483,6 +2501,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SHADOW_STACK_STATUS = 0x4a PR_GET_SPECULATION_CTRL = 0x34 PR_GET_TAGGED_ADDR_CTRL = 0x38 PR_GET_THP_DISABLE = 0x2a @@ -2491,6 +2510,7 @@ const ( PR_GET_TIMING = 0xd PR_GET_TSC = 0x19 PR_GET_UNALIGN = 0x5 + PR_LOCK_SHADOW_STACK_STATUS = 0x4c PR_MCE_KILL = 0x21 PR_MCE_KILL_CLEAR = 0x0 PR_MCE_KILL_DEFAULT = 0x2 @@ -2517,6 +2537,8 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PMLEN_MASK = 0x7f000000 + PR_PMLEN_SHIFT = 0x18 PR_PPC_DEXCR_CTRL_CLEAR = 0x4 PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 @@ -2584,6 +2606,7 @@ const ( PR_SET_PTRACER = 0x59616d61 PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SHADOW_STACK_STATUS = 0x4b PR_SET_SPECULATION_CTRL = 0x35 PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 @@ -2594,6 +2617,9 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 + PR_SHADOW_STACK_ENABLE = 0x1 + PR_SHADOW_STACK_PUSH = 0x4 + PR_SHADOW_STACK_WRITE = 0x2 PR_SME_GET_VL = 0x40 PR_SME_SET_VL = 0x3f PR_SME_SET_VL_ONEXEC = 0x40000 @@ -2625,6 +2651,28 @@ const ( PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 PSTOREFS_MAGIC = 0x6165676c + PTP_CLK_MAGIC = '=' + PTP_ENABLE_FEATURE = 0x1 + PTP_EXTTS_EDGES = 0x6 + PTP_EXTTS_EVENT_VALID = 0x1 + PTP_EXTTS_V1_VALID_FLAGS = 0x7 + PTP_EXTTS_VALID_FLAGS = 0x1f + PTP_EXT_OFFSET = 0x10 + PTP_FALLING_EDGE = 0x4 + PTP_MAX_SAMPLES = 0x19 + PTP_PEROUT_DUTY_CYCLE = 0x2 + PTP_PEROUT_ONE_SHOT = 0x1 + PTP_PEROUT_PHASE = 0x4 + PTP_PEROUT_V1_VALID_FLAGS = 0x0 + PTP_PEROUT_VALID_FLAGS = 0x7 + PTP_PIN_GETFUNC = 0xc0603d06 + PTP_PIN_GETFUNC2 = 0xc0603d0f + PTP_RISING_EDGE = 0x2 + PTP_STRICT_FLAGS = 0x8 + PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09 + PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12 + PTP_SYS_OFFSET_PRECISE = 0xc0403d08 + PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11 PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -2881,7 +2929,6 @@ const ( RTM_NEWNEXTHOP = 0x68 RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 - RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -2890,6 +2937,7 @@ const ( RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c RTM_NEWTUNNEL = 0x78 + RTM_NEWVLAN = 0x70 RTM_NR_FAMILIES = 0x1b RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f @@ -2948,6 +2996,7 @@ const ( RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 SCHED_DEADLINE = 0x6 + SCHED_EXT = 0x7 SCHED_FIFO = 0x1 SCHED_FLAG_ALL = 0x7f SCHED_FLAG_DL_OVERRUN = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 8aa6d77c01..75207613c7 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -109,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -237,6 +240,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 PTRACE_GET_THREAD_AREA = 0x19 @@ -283,10 +300,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -321,6 +341,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index da428f4253..c68acda535 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -109,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -237,6 +240,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_ARCH_PRCTL = 0x1e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 @@ -284,10 +301,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -322,6 +342,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index bf45bfec78..a8c607ab86 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -234,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETCRUNCHREGS = 0x19 PTRACE_GETFDPIC = 0x1f PTRACE_GETFDPIC_EXEC = 0x0 @@ -289,10 +306,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -327,6 +347,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 71c67162b7..18563dd8d3 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -109,15 +109,19 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + GCS_MAGIC = 0x47435300 HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -205,6 +209,7 @@ const ( PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + POE_MAGIC = 0x504f4530 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 PPPIOCBRIDGECHAN = 0x40047435 @@ -240,6 +245,20 @@ const ( PROT_BTI = 0x10 PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f @@ -280,10 +299,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -318,6 +340,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9476628fa0..22912cdaa9 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -109,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -238,6 +241,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 @@ -276,10 +293,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -314,6 +334,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b9e85f3cf0..29344eb37a 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -234,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -282,10 +299,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -320,6 +340,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index a48b68a764..20d51fb96a 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -234,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -282,10 +299,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -320,6 +340,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ea00e8522a..321b60902a 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -234,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -282,10 +299,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -320,6 +340,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 91c6468717..9bacdf1e27 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -234,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -282,10 +299,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -320,6 +340,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 8cbf38d639..c224272615 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -237,6 +240,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -337,10 +354,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -375,6 +395,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index a2df734191..6270c8ee13 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -237,6 +240,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -341,10 +358,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -379,6 +399,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 2479137923..9966c1941f 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -237,6 +240,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -341,10 +358,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -379,6 +399,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index d265f146ee..848e5fcc42 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -234,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFDPIC = 0x21 PTRACE_GETFDPIC_EXEC = 0x0 PTRACE_GETFDPIC_INTERP = 0x1 @@ -273,10 +290,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -311,6 +331,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 3f2d644396..669b2adb80 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -108,12 +108,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -234,6 +237,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 PTRACE_GET_LAST_BREAK = 0x5006 @@ -345,10 +362,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -383,6 +403,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 5d8b727a1c..4834e57514 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -112,12 +112,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -239,6 +242,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPAREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETFPREGS64 = 0x19 @@ -336,10 +353,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x58 + SCM_DEVMEM_LINEAR = 0x57 SCM_TIMESTAMPING = 0x23 SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c SCM_TIMESTAMPNS = 0x21 + SCM_TS_OPT_ID = 0x5a SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -422,6 +442,9 @@ const ( SO_CNX_ADVICE = 0x37 SO_COOKIE = 0x3b SO_DETACH_REUSEPORT_BPF = 0x47 + SO_DEVMEM_DMABUF = 0x58 + SO_DEVMEM_DONTNEED = 0x59 + SO_DEVMEM_LINEAR = 0x57 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/go-controller/vendor/golang.org/x/sys/unix/zsyscall_linux.go index af30da5578..5cc1e8eb2f 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockSettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) if e1 != 0 { diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/go-controller/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 829b87feb8..c6545413c4 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -141,6 +141,16 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so" +//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so" +//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so" +//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so" +//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so" +//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so" //go:cgo_import_dynamic libc_port_create port_create "libc.so" //go:cgo_import_dynamic libc_port_associate port_associate "libc.so" //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" @@ -280,6 +290,16 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procgetpeerucred libc_getpeerucred +//go:linkname procucred_get libc_ucred_get +//go:linkname procucred_geteuid libc_ucred_geteuid +//go:linkname procucred_getegid libc_ucred_getegid +//go:linkname procucred_getruid libc_ucred_getruid +//go:linkname procucred_getrgid libc_ucred_getrgid +//go:linkname procucred_getsuid libc_ucred_getsuid +//go:linkname procucred_getsgid libc_ucred_getsgid +//go:linkname procucred_getpid libc_ucred_getpid +//go:linkname procucred_free libc_ucred_free //go:linkname procport_create libc_port_create //go:linkname procport_associate libc_port_associate //go:linkname procport_dissociate libc_port_dissociate @@ -420,6 +440,16 @@ var ( procgetpeername, procsetsockopt, procrecvfrom, + procgetpeerucred, + procucred_get, + procucred_geteuid, + procucred_getegid, + procucred_getruid, + procucred_getrgid, + procucred_getsuid, + procucred_getsgid, + procucred_getpid, + procucred_free, procport_create, procport_associate, procport_dissociate, @@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getpeerucred(fd uintptr, ucred *uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGet(pid int) (ucred uintptr, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0) + ucred = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGeteuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetegid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetruid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetrgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetpid(ucred uintptr) (pid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredFree(ucred uintptr) { + sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 524b0820cb..c79aaff306 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -458,4 +458,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f485dbf456..5eb450695e 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -381,4 +381,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 70b35bf3b0..05e5029744 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -422,4 +422,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 1893e2fe88..38c53ec51b 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -325,4 +325,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 16a4017da0..31d2e71a18 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -321,4 +321,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 7e567f1eff..f4184a336b 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 38ae55e5ef..05b9962278 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 55e92e60a8..43a256e9e6 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 60658d6a02..eea5ddfc22 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index e203e8a7ed..0d777bfbb1 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -449,4 +449,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5944b97d54..b446365025 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index c66d416dad..0c7d21c188 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a5459e766f..8405391698 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -326,4 +326,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 01d86825bb..fcf1b790d6 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -387,4 +387,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 7b703e77cd..52d15b5f9d 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -400,4 +400,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index d003c3d437..17c53bd9b3 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 0d45a941aa..2392226a74 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/go-controller/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/go-controller/vendor/golang.org/x/sys/unix/ztypes_linux.go b/go-controller/vendor/golang.org/x/sys/unix/ztypes_linux.go index 3a69e45496..a46abe6472 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/go-controller/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1752,12 +1752,6 @@ const ( IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 IFLA_NETKIT_UNSPEC = 0x0 IFLA_NETKIT_PEER_INFO = 0x1 IFLA_NETKIT_PRIMARY = 0x2 @@ -1796,6 +1790,7 @@ const ( IFLA_VXLAN_DF = 0x1d IFLA_VXLAN_VNIFILTER = 0x1e IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_VXLAN_LABEL_POLICY = 0x20 IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1825,6 +1820,8 @@ const ( IFLA_GTP_ROLE = 0x4 IFLA_GTP_CREATE_SOCKETS = 0x5 IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_GTP_LOCAL = 0x7 + IFLA_GTP_LOCAL6 = 0x8 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1857,6 +1854,7 @@ const ( IFLA_BOND_AD_LACP_ACTIVE = 0x1d IFLA_BOND_MISSED_MAX = 0x1e IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_COUPLED_CONTROL = 0x20 IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1925,6 +1923,7 @@ const ( IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 IFLA_HSR_PROTOCOL = 0x7 + IFLA_HSR_INTERLINK = 0x8 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -1977,6 +1976,15 @@ const ( IFLA_DSA_MASTER = 0x1 ) +const ( + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -2586,8 +2594,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x10000 - SOF_TIMESTAMPING_MASK = 0x1ffff + SOF_TIMESTAMPING_LAST = 0x20000 + SOF_TIMESTAMPING_MASK = 0x3ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3533,7 +3541,7 @@ type Nhmsg struct { type NexthopGrp struct { Id uint32 Weight uint8 - Resvd1 uint8 + High uint8 Resvd2 uint16 } @@ -3794,7 +3802,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2c + ETHTOOL_MSG_USER_MAX = 0x2d ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3834,7 +3842,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2c + ETHTOOL_MSG_KERNEL_MAX = 0x2e ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3842,7 +3850,7 @@ const ( ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 ETHTOOL_A_HEADER_FLAGS = 0x3 - ETHTOOL_A_HEADER_MAX = 0x3 + ETHTOOL_A_HEADER_MAX = 0x4 ETHTOOL_A_BITSET_BIT_UNSPEC = 0x0 ETHTOOL_A_BITSET_BIT_INDEX = 0x1 ETHTOOL_A_BITSET_BIT_NAME = 0x2 @@ -4023,11 +4031,11 @@ const ( ETHTOOL_A_CABLE_RESULT_UNSPEC = 0x0 ETHTOOL_A_CABLE_RESULT_PAIR = 0x1 ETHTOOL_A_CABLE_RESULT_CODE = 0x2 - ETHTOOL_A_CABLE_RESULT_MAX = 0x2 + ETHTOOL_A_CABLE_RESULT_MAX = 0x3 ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0x0 ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 0x1 ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 0x2 - ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x2 + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x3 ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 0x1 ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 0x2 @@ -4110,6 +4118,107 @@ type EthtoolDrvinfo struct { Regdump_len uint32 } +type EthtoolTsInfo struct { + Cmd uint32 + So_timestamping uint32 + Phc_index int32 + Tx_types uint32 + Tx_reserved [3]uint32 + Rx_filters uint32 + Rx_reserved [3]uint32 +} + +type HwTstampConfig struct { + Flags int32 + Tx_type int32 + Rx_filter int32 +} + +const ( + HWTSTAMP_FILTER_NONE = 0x0 + HWTSTAMP_FILTER_ALL = 0x1 + HWTSTAMP_FILTER_SOME = 0x2 + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3 + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6 + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9 + HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc +) + +const ( + HWTSTAMP_TX_OFF = 0x0 + HWTSTAMP_TX_ON = 0x1 + HWTSTAMP_TX_ONESTEP_SYNC = 0x2 +) + +type ( + PtpClockCaps struct { + Max_adj int32 + N_alarm int32 + N_ext_ts int32 + N_per_out int32 + Pps int32 + N_pins int32 + Cross_timestamping int32 + Adjust_phase int32 + Max_phase_adj int32 + Rsv [11]int32 + } + PtpClockTime struct { + Sec int64 + Nsec uint32 + Reserved uint32 + } + PtpExttsEvent struct { + T PtpClockTime + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpExttsRequest struct { + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpPeroutRequest struct { + StartOrPhase PtpClockTime + Period PtpClockTime + Index uint32 + Flags uint32 + On PtpClockTime + } + PtpPinDesc struct { + Name [64]byte + Index uint32 + Func uint32 + Chan uint32 + Rsv [5]uint32 + } + PtpSysOffset struct { + Samples uint32 + Rsv [3]uint32 + Ts [51]PtpClockTime + } + PtpSysOffsetExtended struct { + Samples uint32 + Clockid int32 + Rsv [2]uint32 + Ts [25][3]PtpClockTime + } + PtpSysOffsetPrecise struct { + Device PtpClockTime + Realtime PtpClockTime + Monoraw PtpClockTime + Rsv [4]uint32 + } +) + +const ( + PTP_PF_NONE = 0x0 + PTP_PF_EXTTS = 0x1 + PTP_PF_PEROUT = 0x2 + PTP_PF_PHYSYNC = 0x3 +) + type ( HIDRawReportDescriptor struct { Size uint32 @@ -4291,6 +4400,7 @@ const ( type LandlockRulesetAttr struct { Access_fs uint64 Access_net uint64 + Scoped uint64 } type LandlockPathBeneathAttr struct { @@ -4637,7 +4747,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14c + NL80211_ATTR_MAX = 0x14d NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5409,7 +5519,7 @@ const ( NL80211_MNTR_FLAG_CONTROL = 0x3 NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 NL80211_MNTR_FLAG_FCSFAIL = 0x1 - NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_MAX = 0x7 NL80211_MNTR_FLAG_OTHER_BSS = 0x4 NL80211_MNTR_FLAG_PLCPFAIL = 0x2 NL80211_MPATH_FLAG_ACTIVE = 0x1 @@ -6064,3 +6174,5 @@ type SockDiagReq struct { Family uint8 Protocol uint8 } + +const RTM_NEWNVLAN = 0x70 diff --git a/go-controller/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/go-controller/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af468..2e5d5a4435 100644 --- a/go-controller/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/go-controller/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -377,6 +377,12 @@ type Flock_t struct { Pid int32 } +type F_cnvrt struct { + Cvtcmd int32 + Pccsid int16 + Fccsid int16 +} + type Termios struct { Cflag uint32 Iflag uint32 diff --git a/go-controller/vendor/golang.org/x/sys/windows/dll_windows.go b/go-controller/vendor/golang.org/x/sys/windows/dll_windows.go index 4e613cf633..3ca814f54d 100644 --- a/go-controller/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/go-controller/vendor/golang.org/x/sys/windows/dll_windows.go @@ -43,8 +43,8 @@ type DLL struct { // LoadDLL loads DLL file into memory. // // Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL], +// or use [LoadLibraryEx] directly. func LoadDLL(name string) (dll *DLL, err error) { namep, err := UTF16PtrFromString(name) if err != nil { @@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc { } // NewLazyDLL creates new LazyDLL associated with DLL file. +// +// Warning: using NewLazyDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL]. func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } @@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { } return &DLL{Name: name, Handle: h}, nil } - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/go-controller/vendor/golang.org/x/sys/windows/syscall_windows.go b/go-controller/vendor/golang.org/x/sys/windows/syscall_windows.go index 5cee9a3143..4a32543868 100644 --- a/go-controller/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/go-controller/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -168,6 +168,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) //sys DisconnectNamedPipe(pipe Handle) (err error) +//sys GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) +//sys GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -725,20 +727,12 @@ func DurationSinceBoot() time.Duration { } func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e + type _FILE_END_OF_FILE_INFO struct { + EndOfFile int64 } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil + var info _FILE_END_OF_FILE_INFO + info.EndOfFile = length + return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) } func Gettimeofday(tv *Timeval) (err error) { @@ -894,6 +888,11 @@ const socket_error = uintptr(^uint32(0)) //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx +//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange +//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1685,13 +1684,16 @@ func (s NTStatus) Error() string { // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for // the more common *uint16 string type. func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) + s16, err := UTF16FromString(s) if err != nil { return nil, err } - RtlInitUnicodeString(&u, s16) - return &u, nil + n := uint16(len(s16) * 2) + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NULL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil } // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. diff --git a/go-controller/vendor/golang.org/x/sys/windows/types_windows.go b/go-controller/vendor/golang.org/x/sys/windows/types_windows.go index 7b97a154c9..9d138de5fe 100644 --- a/go-controller/vendor/golang.org/x/sys/windows/types_windows.go +++ b/go-controller/vendor/golang.org/x/sys/windows/types_windows.go @@ -176,6 +176,7 @@ const ( WAIT_FAILED = 0xFFFFFFFF // Access rights for process. + PROCESS_ALL_ACCESS = 0xFFFF PROCESS_CREATE_PROCESS = 0x0080 PROCESS_CREATE_THREAD = 0x0002 PROCESS_DUP_HANDLE = 0x0040 @@ -2203,6 +2204,132 @@ const ( IfOperStatusLowerLayerDown = 7 ) +const ( + IF_MAX_PHYS_ADDRESS_LENGTH = 32 + IF_MAX_STRING_SIZE = 256 +) + +// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex. +const ( + MibIfEntryNormal = 0 + MibIfEntryNormalWithoutStatistics = 2 +) + +// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type. +const ( + MibParameterNotification = 0 + MibAddInstance = 1 + MibDeleteInstance = 2 + MibInitialNotification = 3 +) + +// MibIfRow2 stores information about a particular interface. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2. +type MibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid GUID + Alias [IF_MAX_STRING_SIZE + 1]uint16 + Description [IF_MAX_STRING_SIZE + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint8 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid GUID + ConnectionType uint32 + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. +type MibUnicastIpAddressRow struct { + Address RawSockaddrInet6 // SOCKADDR_INET union + InterfaceLuid uint64 + InterfaceIndex uint32 + PrefixOrigin uint32 + SuffixOrigin uint32 + ValidLifetime uint32 + PreferredLifetime uint32 + OnLinkPrefixLength uint8 + SkipAsSource uint8 + DadState uint32 + ScopeId uint32 + CreationTimeStamp Filetime +} + +const ScopeLevelCount = 16 + +// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface. +// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row. +type MibIpInterfaceRow struct { + Family uint16 + InterfaceLuid uint64 + InterfaceIndex uint32 + MaxReassemblySize uint32 + InterfaceIdentifier uint64 + MinRouterAdvertisementInterval uint32 + MaxRouterAdvertisementInterval uint32 + AdvertisingEnabled uint8 + ForwardingEnabled uint8 + WeakHostSend uint8 + WeakHostReceive uint8 + UseAutomaticMetric uint8 + UseNeighborUnreachabilityDetection uint8 + ManagedAddressConfigurationSupported uint8 + OtherStatefulConfigurationSupported uint8 + AdvertiseDefaultRoute uint8 + RouterDiscoveryBehavior uint32 + DadTransmits uint32 + BaseReachableTime uint32 + RetransmitTime uint32 + PathMtuDiscoveryTimeout uint32 + LinkLocalAddressBehavior uint32 + LinkLocalAddressTimeout uint32 + ZoneIndices [ScopeLevelCount]uint32 + SitePrefixLength uint32 + Metric uint32 + NlMtu uint32 + Connected uint8 + SupportsWakeUpPatterns uint8 + SupportsNeighborDiscovery uint8 + SupportsRouterDiscovery uint8 + ReachableTime uint32 + TransmitOffload uint32 + ReceiveOffload uint32 + DisableDefaultRoutes uint8 +} + // Console related constants used for the mode parameter to SetConsoleMode. See // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. diff --git a/go-controller/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/go-controller/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 4c2e1bdc01..01c0716c2c 100644 --- a/go-controller/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/go-controller/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -181,10 +181,15 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") + procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -275,8 +280,10 @@ var ( procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount") procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeClientProcessId = modkernel32.NewProc("GetNamedPipeClientProcessId") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -1606,6 +1613,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si return } +func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { + r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1638,6 +1653,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { + r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { + r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func AddDllDirectory(path *uint16) (cookie uintptr, err error) { r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) cookie = uintptr(r0) @@ -2393,6 +2448,14 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er return } +func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) if r1 == 0 { @@ -2409,6 +2472,14 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 return } +func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { var _p0 uint32 if wait { diff --git a/go-controller/vendor/golang.org/x/term/README.md b/go-controller/vendor/golang.org/x/term/README.md index d03d0aefef..05ff623f94 100644 --- a/go-controller/vendor/golang.org/x/term/README.md +++ b/go-controller/vendor/golang.org/x/term/README.md @@ -4,16 +4,13 @@ This repository provides Go terminal and console support packages. -## Download/Install - -The easiest way to install is to run `go get -u golang.org/x/term`. You can -also manually git clone the repository to `$GOPATH/src/golang.org/x/term`. - ## Report Issues / Send Patches This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/term. The main issue tracker for the term repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/term:" in the +https://go.dev/issues. Prefix your issue with "x/term:" in the subject line, so it is easy to find. diff --git a/go-controller/vendor/golang.org/x/text/language/parse.go b/go-controller/vendor/golang.org/x/text/language/parse.go index 4d57222e77..053336e286 100644 --- a/go-controller/vendor/golang.org/x/text/language/parse.go +++ b/go-controller/vendor/golang.org/x/text/language/parse.go @@ -59,7 +59,7 @@ func (c CanonType) Parse(s string) (t Tag, err error) { if changed { tt.RemakeString() } - return makeTag(tt), err + return makeTag(tt), nil } // Compose creates a Tag from individual parts, which may be of type Tag, Base, diff --git a/go-controller/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go b/go-controller/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go new file mode 100644 index 0000000000..c2fe519714 --- /dev/null +++ b/go-controller/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go @@ -0,0 +1,418 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "fmt" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// FindPort locates the container port for the given pod and portName. If the +// targetPort is a number, use that. If the targetPort is a string, look that +// string up in all named ports in all containers in the target pod. If no +// match is found, fail. +func FindPort(pod *v1.Pod, svcPort *v1.ServicePort) (int, error) { + portName := svcPort.TargetPort + switch portName.Type { + case intstr.String: + name := portName.StrVal + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + if port.Name == name && port.Protocol == svcPort.Protocol { + return int(port.ContainerPort), nil + } + } + } + // also support sidecar container (initContainer with restartPolicy=Always) + for _, container := range pod.Spec.InitContainers { + if container.RestartPolicy == nil || *container.RestartPolicy != v1.ContainerRestartPolicyAlways { + continue + } + for _, port := range container.Ports { + if port.Name == name && port.Protocol == svcPort.Protocol { + return int(port.ContainerPort), nil + } + } + } + case intstr.Int: + return portName.IntValue(), nil + } + + return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID) +} + +// ContainerType signifies container type +type ContainerType int + +const ( + // Containers is for normal containers + Containers ContainerType = 1 << iota + // InitContainers is for init containers + InitContainers + // EphemeralContainers is for ephemeral containers + EphemeralContainers +) + +// AllContainers specifies that all containers be visited +const AllContainers ContainerType = InitContainers | Containers | EphemeralContainers + +// AllFeatureEnabledContainers returns a ContainerType mask which includes all container +// types except for the ones guarded by feature gate. +func AllFeatureEnabledContainers() ContainerType { + return AllContainers +} + +// ContainerVisitor is called with each container spec, and returns true +// if visiting should continue. +type ContainerVisitor func(container *v1.Container, containerType ContainerType) (shouldContinue bool) + +// Visitor is called with each object name, and returns true if visiting should continue +type Visitor func(name string) (shouldContinue bool) + +func skipEmptyNames(visitor Visitor) Visitor { + return func(name string) bool { + if len(name) == 0 { + // continue visiting + return true + } + // delegate to visitor + return visitor(name) + } +} + +// VisitContainers invokes the visitor function with a pointer to every container +// spec in the given pod spec with type set in mask. If visitor returns false, +// visiting is short-circuited. VisitContainers returns true if visiting completes, +// false if visiting was short-circuited. +func VisitContainers(podSpec *v1.PodSpec, mask ContainerType, visitor ContainerVisitor) bool { + if mask&InitContainers != 0 { + for i := range podSpec.InitContainers { + if !visitor(&podSpec.InitContainers[i], InitContainers) { + return false + } + } + } + if mask&Containers != 0 { + for i := range podSpec.Containers { + if !visitor(&podSpec.Containers[i], Containers) { + return false + } + } + } + if mask&EphemeralContainers != 0 { + for i := range podSpec.EphemeralContainers { + if !visitor((*v1.Container)(&podSpec.EphemeralContainers[i].EphemeralContainerCommon), EphemeralContainers) { + return false + } + } + } + return true +} + +// VisitPodSecretNames invokes the visitor function with the name of every secret +// referenced by the pod spec. If visitor returns false, visiting is short-circuited. +// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. +// Returns true if visiting completed, false if visiting was short-circuited. +func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool { + visitor = skipEmptyNames(visitor) + for _, reference := range pod.Spec.ImagePullSecrets { + if !visitor(reference.Name) { + return false + } + } + VisitContainers(&pod.Spec, AllContainers, func(c *v1.Container, containerType ContainerType) bool { + return visitContainerSecretNames(c, visitor) + }) + var source *v1.VolumeSource + + for i := range pod.Spec.Volumes { + source = &pod.Spec.Volumes[i].VolumeSource + switch { + case source.AzureFile != nil: + if len(source.AzureFile.SecretName) > 0 && !visitor(source.AzureFile.SecretName) { + return false + } + case source.CephFS != nil: + if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) { + return false + } + case source.Cinder != nil: + if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Name) { + return false + } + case source.FlexVolume != nil: + if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) { + return false + } + case source.Projected != nil: + for j := range source.Projected.Sources { + if source.Projected.Sources[j].Secret != nil { + if !visitor(source.Projected.Sources[j].Secret.Name) { + return false + } + } + } + case source.RBD != nil: + if source.RBD.SecretRef != nil && !visitor(source.RBD.SecretRef.Name) { + return false + } + case source.Secret != nil: + if !visitor(source.Secret.SecretName) { + return false + } + case source.ScaleIO != nil: + if source.ScaleIO.SecretRef != nil && !visitor(source.ScaleIO.SecretRef.Name) { + return false + } + case source.ISCSI != nil: + if source.ISCSI.SecretRef != nil && !visitor(source.ISCSI.SecretRef.Name) { + return false + } + case source.StorageOS != nil: + if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Name) { + return false + } + case source.CSI != nil: + if source.CSI.NodePublishSecretRef != nil && !visitor(source.CSI.NodePublishSecretRef.Name) { + return false + } + } + } + return true +} + +// visitContainerSecretNames returns true unless the visitor returned false when invoked with a secret reference +func visitContainerSecretNames(container *v1.Container, visitor Visitor) bool { + for _, env := range container.EnvFrom { + if env.SecretRef != nil { + if !visitor(env.SecretRef.Name) { + return false + } + } + } + for _, envVar := range container.Env { + if envVar.ValueFrom != nil && envVar.ValueFrom.SecretKeyRef != nil { + if !visitor(envVar.ValueFrom.SecretKeyRef.Name) { + return false + } + } + } + return true +} + +// VisitPodConfigmapNames invokes the visitor function with the name of every configmap +// referenced by the pod spec. If visitor returns false, visiting is short-circuited. +// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. +// Returns true if visiting completed, false if visiting was short-circuited. +func VisitPodConfigmapNames(pod *v1.Pod, visitor Visitor) bool { + visitor = skipEmptyNames(visitor) + VisitContainers(&pod.Spec, AllContainers, func(c *v1.Container, containerType ContainerType) bool { + return visitContainerConfigmapNames(c, visitor) + }) + var source *v1.VolumeSource + for i := range pod.Spec.Volumes { + source = &pod.Spec.Volumes[i].VolumeSource + switch { + case source.Projected != nil: + for j := range source.Projected.Sources { + if source.Projected.Sources[j].ConfigMap != nil { + if !visitor(source.Projected.Sources[j].ConfigMap.Name) { + return false + } + } + } + case source.ConfigMap != nil: + if !visitor(source.ConfigMap.Name) { + return false + } + } + } + return true +} + +// visitContainerConfigmapNames returns true unless the visitor returned false when invoked with a configmap reference +func visitContainerConfigmapNames(container *v1.Container, visitor Visitor) bool { + for _, env := range container.EnvFrom { + if env.ConfigMapRef != nil { + if !visitor(env.ConfigMapRef.Name) { + return false + } + } + } + for _, envVar := range container.Env { + if envVar.ValueFrom != nil && envVar.ValueFrom.ConfigMapKeyRef != nil { + if !visitor(envVar.ValueFrom.ConfigMapKeyRef.Name) { + return false + } + } + } + return true +} + +// GetContainerStatus extracts the status of container "name" from "statuses". +// It returns true if "name" exists, else returns false. +func GetContainerStatus(statuses []v1.ContainerStatus, name string) (v1.ContainerStatus, bool) { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i], true + } + } + return v1.ContainerStatus{}, false +} + +// GetExistingContainerStatus extracts the status of container "name" from "statuses", +// It also returns if "name" exists. +func GetExistingContainerStatus(statuses []v1.ContainerStatus, name string) v1.ContainerStatus { + status, _ := GetContainerStatus(statuses, name) + return status +} + +// GetIndexOfContainerStatus gets the index of status of container "name" from "statuses", +// It returns (index, true) if "name" exists, else returns (0, false). +func GetIndexOfContainerStatus(statuses []v1.ContainerStatus, name string) (int, bool) { + for i := range statuses { + if statuses[i].Name == name { + return i, true + } + } + return 0, false +} + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || (!c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time)) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *v1.Pod) bool { + return IsPodReadyConditionTrue(pod.Status) +} + +// IsPodTerminal returns true if a pod is terminal, all containers are stopped and cannot ever regress. +func IsPodTerminal(pod *v1.Pod) bool { + return IsPodPhaseTerminal(pod.Status.Phase) +} + +// IsPodPhaseTerminal returns true if the pod's phase is terminal. +func IsPodPhaseTerminal(phase v1.PodPhase) bool { + return phase == v1.PodFailed || phase == v1.PodSucceeded +} + +// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. +func IsPodReadyConditionTrue(status v1.PodStatus) bool { + condition := GetPodReadyCondition(status) + return condition != nil && condition.Status == v1.ConditionTrue +} + +// IsContainersReadyConditionTrue returns true if a pod is ready; false otherwise. +func IsContainersReadyConditionTrue(status v1.PodStatus) bool { + condition := GetContainersReadyCondition(status) + return condition != nil && condition.Status == v1.ConditionTrue +} + +// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { + _, condition := GetPodCondition(&status, v1.PodReady) + return condition +} + +// GetContainersReadyCondition extracts the containers ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetContainersReadyCondition(status v1.PodStatus) *v1.PodCondition { + _, condition := GetPodCondition(&status, v1.ContainersReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) { + if status == nil { + return -1, nil + } + return GetPodConditionFromList(status.Conditions, conditionType) +} + +// GetPodConditionFromList extracts the provided condition from the given list of condition and +// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present. +func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodConditionType) (int, *v1.PodCondition) { + if conditions == nil { + return -1, nil + } + for i := range conditions { + if conditions[i].Type == conditionType { + return i, &conditions[i] + } + } + return -1, nil +} + +// UpdatePodCondition updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// status has changed. +// Returns true if pod condition has changed or has been added. +func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool { + condition.LastTransitionTime = metav1.Now() + // Try to find this pod condition. + conditionIndex, oldCondition := GetPodCondition(status, condition.Type) + + if oldCondition == nil { + // We are adding new pod condition. + status.Conditions = append(status.Conditions, *condition) + return true + } + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual +} + +// IsRestartableInitContainer returns true if the container has ContainerRestartPolicyAlways. +// This function is not checking if the container passed to it is indeed an init container. +// It is just checking if the container restart policy has been set to always. +func IsRestartableInitContainer(initContainer *v1.Container) bool { + if initContainer == nil || initContainer.RestartPolicy == nil { + return false + } + return *initContainer.RestartPolicy == v1.ContainerRestartPolicyAlways +} diff --git a/go-controller/vendor/modules.txt b/go-controller/vendor/modules.txt index 9ee1b0e2b4..7636490960 100644 --- a/go-controller/vendor/modules.txt +++ b/go-controller/vendor/modules.txt @@ -197,8 +197,8 @@ github.com/juju/errors # github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 ## explicit; go 1.17 github.com/k8snetworkplumbingwg/govdpa/pkg/kvdpa -# github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha -## explicit; go 1.20 +# github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha +## explicit; go 1.23.0 github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1 github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1/apis/clientset/versioned/fake @@ -385,19 +385,19 @@ github.com/openshift/client-go/network/listers/network/v1alpha1 # github.com/openshift/custom-resource-status v1.1.2 ## explicit; go 1.12 github.com/openshift/custom-resource-status/conditions/v1 -# github.com/ovn-org/libovsdb v0.7.1-0.20240820095311-ce1951614a20 +# github.com/ovn-kubernetes/libovsdb v0.8.0 ## explicit; go 1.18 -github.com/ovn-org/libovsdb/cache -github.com/ovn-org/libovsdb/client -github.com/ovn-org/libovsdb/database -github.com/ovn-org/libovsdb/database/inmemory -github.com/ovn-org/libovsdb/database/transaction -github.com/ovn-org/libovsdb/mapper -github.com/ovn-org/libovsdb/model -github.com/ovn-org/libovsdb/ovsdb -github.com/ovn-org/libovsdb/ovsdb/serverdb -github.com/ovn-org/libovsdb/server -github.com/ovn-org/libovsdb/updates +github.com/ovn-kubernetes/libovsdb/cache +github.com/ovn-kubernetes/libovsdb/client +github.com/ovn-kubernetes/libovsdb/database +github.com/ovn-kubernetes/libovsdb/database/inmemory +github.com/ovn-kubernetes/libovsdb/database/transaction +github.com/ovn-kubernetes/libovsdb/mapper +github.com/ovn-kubernetes/libovsdb/model +github.com/ovn-kubernetes/libovsdb/ovsdb +github.com/ovn-kubernetes/libovsdb/ovsdb/serverdb +github.com/ovn-kubernetes/libovsdb/server +github.com/ovn-kubernetes/libovsdb/updates # github.com/pborman/uuid v1.2.0 ## explicit github.com/pborman/uuid @@ -473,8 +473,8 @@ go.opencensus.io/internal go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate -# golang.org/x/crypto v0.28.0 -## explicit; go 1.20 +# golang.org/x/crypto v0.36.0 +## explicit; go 1.23.0 golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/ed25519 @@ -482,8 +482,8 @@ golang.org/x/crypto/ed25519 ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps -# golang.org/x/net v0.30.0 -## explicit; go 1.18 +# golang.org/x/net v0.38.0 +## explicit; go 1.23.0 golang.org/x/net/bpf golang.org/x/net/context golang.org/x/net/html @@ -494,6 +494,7 @@ golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/icmp golang.org/x/net/idna +golang.org/x/net/internal/httpcommon golang.org/x/net/internal/iana golang.org/x/net/internal/socket golang.org/x/net/internal/socks @@ -503,25 +504,25 @@ golang.org/x/net/ipv6 golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.23.0 -## explicit; go 1.18 +# golang.org/x/oauth2 v0.27.0 +## explicit; go 1.23.0 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.8.0 -## explicit; go 1.18 +# golang.org/x/sync v0.12.0 +## explicit; go 1.23.0 golang.org/x/sync/errgroup -# golang.org/x/sys v0.26.0 -## explicit; go 1.18 +# golang.org/x/sys v0.31.0 +## explicit; go 1.23.0 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc -# golang.org/x/term v0.25.0 -## explicit; go 1.18 +# golang.org/x/term v0.30.0 +## explicit; go 1.23.0 golang.org/x/term -# golang.org/x/text v0.19.0 -## explicit; go 1.18 +# golang.org/x/text v0.23.0 +## explicit; go 1.23.0 golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/htmlindex @@ -676,7 +677,7 @@ gopkg.in/warnings.v0 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.32.3 +# k8s.io/api v0.32.5 ## explicit; go 1.23.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -741,7 +742,7 @@ k8s.io/api/storagemigration/v1alpha1 ## explicit; go 1.23.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 -# k8s.io/apimachinery v0.32.3 +# k8s.io/apimachinery v0.32.5 ## explicit; go 1.23.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -806,7 +807,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.32.3 +# k8s.io/client-go v0.32.5 ## explicit; go 1.23.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -1173,8 +1174,9 @@ k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/kubernetes v1.32.3 +# k8s.io/kubernetes v1.32.6 ## explicit; go 1.23.0 +k8s.io/kubernetes/pkg/api/v1/pod k8s.io/kubernetes/pkg/apis/core k8s.io/kubernetes/pkg/probe k8s.io/kubernetes/pkg/probe/http diff --git a/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/ovnkube-control-plane.yaml b/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/ovnkube-control-plane.yaml index 2b6edcaa8e..465a0aa665 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/ovnkube-control-plane.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-control-plane/templates/ovnkube-control-plane.yaml @@ -126,6 +126,8 @@ spec: value: {{ hasKey .Values.global "enableMultiNetwork" | ternary .Values.global.enableMultiNetwork false | quote }} - name: OVN_NETWORK_SEGMENTATION_ENABLE value: {{ default "" .Values.global.enableNetworkSegmentation | quote }} + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: {{ default "" .Values.global.enablePreconfiguredUDNAddresses | quote }} - name: OVN_HYBRID_OVERLAY_NET_CIDR value: {{ default "" .Values.global.hybridOverlayNetCidr | quote }} - name: OVN_DISABLE_SNAT_MULTIPLE_GWS diff --git a/helm/ovn-kubernetes/charts/ovnkube-node/templates/ovnkube-node.yaml b/helm/ovn-kubernetes/charts/ovnkube-node/templates/ovnkube-node.yaml index e4b0a0621a..dbf6268f6a 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-node/templates/ovnkube-node.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-node/templates/ovnkube-node.yaml @@ -229,6 +229,8 @@ spec: value: {{ hasKey .Values.global "enableMultiNetwork" | ternary .Values.global.enableMultiNetwork false | quote }} - name: OVN_NETWORK_SEGMENTATION_ENABLE value: {{ default "" .Values.global.enableNetworkSegmentation | quote }} + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: {{ default "" .Values.global.enablePreconfiguredUDNAddresses | quote }} - name: OVN_ENABLE_INTERCONNECT value: {{ hasKey .Values.global "enableInterconnect" | ternary .Values.global.enableInterconnect false | quote }} - name: OVN_ENABLE_MULTI_EXTERNAL_GATEWAY diff --git a/helm/ovn-kubernetes/charts/ovnkube-single-node-zone/templates/ovnkube-single-node-zone.yaml b/helm/ovn-kubernetes/charts/ovnkube-single-node-zone/templates/ovnkube-single-node-zone.yaml index d60276308b..2cd3913633 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-single-node-zone/templates/ovnkube-single-node-zone.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-single-node-zone/templates/ovnkube-single-node-zone.yaml @@ -414,6 +414,8 @@ spec: value: {{ hasKey .Values.global "enableMultiNetwork" | ternary .Values.global.enableMultiNetwork false | quote }} - name: OVN_NETWORK_SEGMENTATION_ENABLE value: {{ default "" .Values.global.enableNetworkSegmentation | quote }} + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: {{ default "" .Values.global.enablePreconfiguredUDNAddresses | quote }} - name: OVNKUBE_NODE_MGMT_PORT_NETDEV value: {{ default "" .Values.global.nodeMgmtPortNetdev | quote }} - name: OVN_EMPTY_LB_EVENTS diff --git a/helm/ovn-kubernetes/charts/ovnkube-zone-controller/templates/ovnkube-zone-controller.yaml b/helm/ovn-kubernetes/charts/ovnkube-zone-controller/templates/ovnkube-zone-controller.yaml index f692ed0524..3a437db089 100644 --- a/helm/ovn-kubernetes/charts/ovnkube-zone-controller/templates/ovnkube-zone-controller.yaml +++ b/helm/ovn-kubernetes/charts/ovnkube-zone-controller/templates/ovnkube-zone-controller.yaml @@ -313,6 +313,8 @@ spec: value: {{ hasKey .Values.global "enableMultiNetwork" | ternary .Values.global.enableMultiNetwork false | quote }} - name: OVN_NETWORK_SEGMENTATION_ENABLE value: {{ default "" .Values.global.enableNetworkSegmentation | quote }} + - name: OVN_PRE_CONF_UDN_ADDR_ENABLE + value: {{ default "" .Values.global.enablePreconfiguredUDNAddresses | quote }} - name: OVN_HYBRID_OVERLAY_NET_CIDR value: {{ default "" .Values.global.hybridOverlayNetCidr | quote }} - name: OVN_DISABLE_SNAT_MULTIPLE_GWS diff --git a/helm/ovn-kubernetes/values-multi-node-zone.yaml b/helm/ovn-kubernetes/values-multi-node-zone.yaml index 2eef44ecae..8056461256 100644 --- a/helm/ovn-kubernetes/values-multi-node-zone.yaml +++ b/helm/ovn-kubernetes/values-multi-node-zone.yaml @@ -76,6 +76,8 @@ global: enableMultiNetwork: false # -- Configure to use user defined networks (UDN) feature with ovn-kubernetes enableNetworkSegmentation: false + # -- Configure to enable workloads with preconfigured network connect to user defined networks (UDN) with ovn-kubernetes + enablePreconfiguredUDNAddresses: false # -- Configure to enable IPsec enableIpsec: false # -- Use SSL transport to NB/SB db and northd diff --git a/helm/ovn-kubernetes/values-single-node-zone.yaml b/helm/ovn-kubernetes/values-single-node-zone.yaml index 9747d45440..516b77220b 100644 --- a/helm/ovn-kubernetes/values-single-node-zone.yaml +++ b/helm/ovn-kubernetes/values-single-node-zone.yaml @@ -76,6 +76,8 @@ global: enableMultiNetwork: false # -- Configure to use user defined networks (UDN) feature with ovn-kubernetes enableNetworkSegmentation: false + # -- Configure to enable workloads with preconfigured network connect to user defined networks (UDN) with ovn-kubernetes + enablePreconfiguredUDNAddresses: false # -- Configure to enable IPsec enableIpsec: false # -- Use SSL transport to NB/SB db and northd diff --git a/mkdocs.yml b/mkdocs.yml index e21134af5a..f82f75c977 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -7,7 +7,7 @@ extra_css: - stylesheets/extra.css site_dir: site docs_dir: docs -copyright: The Linux Foundation® (TLF) has registered trademarks and uses trademarks. For a list of TLF trademarks, see Trademark Usage. +copyright: Copyright © OVN-Kubernetes a Series of LF Projects, LLC. For website terms of use, trademark policy and other project policies please see LF Projects Policies. theme: name: material icon: @@ -42,6 +42,7 @@ plugins: - macros: #include_dir: examples j2_line_comment_prefix: "#$" + - mermaid2 - blog: # NOTE: configuration options can be found at # https://squidfunk.github.io/mkdocs-material/setup/setting-up-a-blog/ @@ -58,7 +59,11 @@ markdown_extensions: - pymdownx.details - pymdownx.highlight - pymdownx.inlinehilite - - pymdownx.superfences + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:mermaid2.fence_mermaid_custom - pymdownx.snippets: base_path: site-src check_paths: true @@ -121,7 +126,9 @@ nav: - MultiNetworkPolicies: features/multiple-networks/multi-network-policies.md - MultiNetworkRails: features/multiple-networks/multi-vtep.md - Multicast: features/multicast.md - - NetworkQoS: features/network-qos.md + - NetworkQoS: + - Overview: features/network-qos.md + - Usage Guide: features/network-qos-guide.md - LiveMigration: features/live-migration.md - HybridOverlay: features/hybrid-overlay.md - Hardware Acceleration: @@ -137,9 +144,9 @@ nav: - OVN observability: observability/ovn-observability.md - Enhancement Proposals: # - FeatureName: okeps/ - - Template: okeps/okep-4368-template.md - Localnet API: okeps/okep-5085-localnet-api.md - Network QoS: okeps/okep-4380-network-qos.md - User Defined Networks: okeps/okep-5193-user-defined-networks.md + - Preconfigured UDN Addresses: okeps/okep-5233-preconfigured-udn-addresses.md - Blog: - blog/index.md diff --git a/requirements.txt b/requirements.txt index ecb270c79d..bb1c507df1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ mkdocs-awesome-pages-plugin mkdocs-macros-plugin mkdocs-material mkdocs-material-extensions +mkdocs-mermaid2-plugin mike pep562 Pygments diff --git a/test/conformance/go.mod b/test/conformance/go.mod index 65883ef719..de64ed280e 100644 --- a/test/conformance/go.mod +++ b/test/conformance/go.mod @@ -1,6 +1,6 @@ module github.com/ovn-org/ovn-kubernetes/test/conformance -go 1.21 +go 1.23.0 require ( gopkg.in/yaml.v3 v3.0.1 @@ -38,13 +38,12 @@ require ( github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.8.4 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/oauth2 v0.12.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect golang.org/x/time v0.3.0 // indirect - google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/test/conformance/go.sum b/test/conformance/go.sum index 14a3443c7f..175ec601cc 100644 --- a/test/conformance/go.sum +++ b/test/conformance/go.sum @@ -23,7 +23,6 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -106,43 +105,39 @@ golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2F golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= -golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= diff --git a/test/e2e/acl_logging.go b/test/e2e/acl_logging.go index 0ea81c6f71..c5c129769b 100644 --- a/test/e2e/acl_logging.go +++ b/test/e2e/acl_logging.go @@ -9,6 +9,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" + v1 "k8s.io/api/core/v1" knet "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -25,7 +27,7 @@ const ( pokeInterval = 1 * time.Second ) -var _ = Describe("ACL Logging for NetworkPolicy", func() { +var _ = Describe("ACL Logging for NetworkPolicy", feature.NetworkPolicy, func() { const ( denyAllPolicyName = "default-deny-all" initialDenyACLSeverity = "alert" @@ -172,7 +174,7 @@ var _ = Describe("ACL Logging for NetworkPolicy", func() { }) }) -var _ = Describe("ACL Logging for AdminNetworkPolicy and BaselineAdminNetworkPolicy", func() { +var _ = Describe("ACL Logging for AdminNetworkPolicy and BaselineAdminNetworkPolicy", feature.AdminNetworkPolicy, feature.BaselineNetworkPolicy, func() { const ( initialDenyACLSeverity = "alert" initialAllowACLSeverity = "notice" @@ -487,7 +489,7 @@ var _ = Describe("ACL Logging for AdminNetworkPolicy and BaselineAdminNetworkPol }) }) -var _ = Describe("ACL Logging for EgressFirewall", func() { +var _ = Describe("ACL Logging for EgressFirewall", feature.EgressFirewall, func() { const ( denyAllPolicyName = "default-deny-all" initialDenyACLSeverity = "alert" diff --git a/test/e2e/containerengine/container_engine.go b/test/e2e/containerengine/container_engine.go index 12d96829b2..a9281fbb48 100644 --- a/test/e2e/containerengine/container_engine.go +++ b/test/e2e/containerengine/container_engine.go @@ -12,6 +12,16 @@ func (ce ContainerEngine) String() string { return string(ce) } +func (ce ContainerEngine) NetworkCIDRsFmt() string { + if ce == Podman { + return "{{json .Subnets }}" + } + if ce == Docker { + return "{{json .IPAM.Config }}" + } + return "" +} + const ( Docker ContainerEngine = "docker" Podman ContainerEngine = "podman" diff --git a/test/e2e/deploymentconfig/api/api.go b/test/e2e/deploymentconfig/api/api.go index 573ced8cb8..dc43e87c9b 100644 --- a/test/e2e/deploymentconfig/api/api.go +++ b/test/e2e/deploymentconfig/api/api.go @@ -4,6 +4,7 @@ package api // Remove when OVN-Kubernetes exposes its config via an API. type DeploymentConfig interface { OVNKubernetesNamespace() string + FRRK8sNamespace() string ExternalBridgeName() string PrimaryInterfaceName() string } diff --git a/test/e2e/deploymentconfig/configs/kind/kind.go b/test/e2e/deploymentconfig/configs/kind/kind.go index be3f35aa73..d05c6a7061 100644 --- a/test/e2e/deploymentconfig/configs/kind/kind.go +++ b/test/e2e/deploymentconfig/configs/kind/kind.go @@ -33,6 +33,10 @@ func (k kind) OVNKubernetesNamespace() string { return "ovn-kubernetes" } +func (k kind) FRRK8sNamespace() string { + return "frr-k8s-system" +} + func (k kind) ExternalBridgeName() string { return "breth0" } diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index ac9bc8fb3b..e5bbde7d42 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -17,6 +17,8 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/config" + "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/test/e2e/containerengine" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" @@ -558,16 +560,13 @@ func getApiAddress() string { } // IsGatewayModeLocal returns true if the gateway mode is local -func IsGatewayModeLocal() bool { - anno, err := e2ekubectl.RunKubectl("default", "get", "node", "ovn-control-plane", "-o", "template", "--template={{.metadata.annotations}}") - if err != nil { - framework.Logf("Error getting annotations: %v", err) - return false - } - framework.Logf("Annotations received: %s", anno) - isLocal := strings.Contains(anno, "local") - framework.Logf("IsGatewayModeLocal returning: %v", isLocal) - return isLocal +func IsGatewayModeLocal(cs kubernetes.Interface) bool { + ginkgo.GinkgoHelper() + node, err := e2enode.GetRandomReadySchedulableNode(context.TODO(), cs) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + l3Config, err := util.ParseNodeL3GatewayAnnotation(node) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "must get node l3 gateway annotation") + return l3Config.Mode == config.GatewayModeLocal } // restartOVNKubeNodePod restarts the ovnkube-node pod from namespace, running on nodeName @@ -713,7 +712,7 @@ var _ = ginkgo.Describe("e2e control plane", func() { } secondaryExternalContainerPort := infraprovider.Get().GetExternalContainerPort() secondaryExternalContainerSpec := infraapi.ExternalContainer{Name: "e2e-ovn-k", Image: images.AgnHost(), - Network: secondaryProviderNetwork, Args: getAgnHostHTTPPortBindCMDArgs(secondaryExternalContainerPort), ExtPort: secondaryExternalContainerPort} + Network: secondaryProviderNetwork, CmdArgs: getAgnHostHTTPPortBindCMDArgs(secondaryExternalContainerPort), ExtPort: secondaryExternalContainerPort} ginkgo.By("creating container on secondary provider network") secondaryExternalContainer, err = providerCtx.CreateExternalContainer(secondaryExternalContainerSpec) framework.ExpectNoError(err, "failed to create external container") @@ -1276,7 +1275,7 @@ var _ = ginkgo.Describe("e2e ingress traffic validation", func() { framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer = infraapi.ExternalContainer{Name: "e2e-ingress", Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external service", externalContainer.String()) }) @@ -1673,7 +1672,7 @@ var _ = ginkgo.Describe("e2e ingress traffic validation", func() { framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer = infraapi.ExternalContainer{Name: "e2e-ingress-add-more", Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "external container %s must be created successfully", externalContainer.Name) @@ -1835,7 +1834,7 @@ var _ = ginkgo.Describe("e2e ingress to host-networked pods traffic validation", framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer = infraapi.ExternalContainer{Name: clientContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "external container %s must be created successfully", externalContainer.Name) }) @@ -1944,7 +1943,7 @@ var _ = ginkgo.Describe("e2e br-int flow monitoring export validation", func() { primaryProviderNetwork, err := infraprovider.Get().PrimaryNetwork() framework.ExpectNoError(err, "failed to get primary network") collectorExternalContainer := infraapi.ExternalContainer{Name: getContainerName(collectorPort), Image: "cloudflare/goflow", - Network: primaryProviderNetwork, Args: []string{"-kafka=false"}, ExtPort: collectorPort} + Network: primaryProviderNetwork, CmdArgs: []string{"-kafka=false"}, ExtPort: collectorPort} collectorExternalContainer, err = providerCtx.CreateExternalContainer(collectorExternalContainer) if err != nil { framework.Failf("failed to start flow collector container %s: %v", getContainerName(collectorPort), err) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 0359b3461b..d96b488297 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -13,13 +13,13 @@ import ( "github.com/ovn-org/ovn-kubernetes/test/e2e/diagnostics" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" "github.com/ovn-org/ovn-kubernetes/test/e2e/ipalloc" + "github.com/ovn-org/ovn-kubernetes/test/e2e/label" + clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog" "k8s.io/kubernetes/test/e2e/framework" e2econfig "k8s.io/kubernetes/test/e2e/framework/config" - "k8s.io/kubernetes/test/e2e/framework/testfiles" - "k8s.io/kubernetes/test/utils/image" ) // https://github.com/kubernetes/kubernetes/blob/v1.16.4/test/e2e/e2e_test.go#L62 @@ -55,26 +55,7 @@ var _ = ginkgo.BeforeSuite(func() { func TestMain(m *testing.M) { // Register test flags, then parse flags. handleFlags() - - if framework.TestContext.ListImages { - for _, v := range image.GetImageConfigs() { - fmt.Println(v.GetE2EImage()) - } - os.Exit(0) - } - // reset provider to skeleton as Kubernetes test framework expects a supported provider - framework.TestContext.Provider = "skeleton" - framework.AfterReadingAllFlags(&framework.TestContext) - - // TODO: Deprecating repo-root over time... instead just use gobindata_util.go , see #23987. - // Right now it is still needed, for example by - // test/e2e/framework/ingress/ingress_utils.go - // for providing the optional secret.yaml file and by - // test/e2e/framework/util.go for cluster/log-dump. - if framework.TestContext.RepoRoot != "" { - testfiles.AddFileSource(testfiles.RootFileSource{Root: framework.TestContext.RepoRoot}) - } - + ProcessTestContextAndSetupLogging() os.Exit(m.Run()) } @@ -88,5 +69,5 @@ func TestE2E(t *testing.T) { } } gomega.RegisterFailHandler(framework.Fail) - ginkgo.RunSpecs(t, "E2E Suite") + ginkgo.RunSpecs(t, "E2E Suite", label.ComponentName()) } diff --git a/test/e2e/egress_firewall.go b/test/e2e/egress_firewall.go index e5a3f8518a..abbc26b524 100644 --- a/test/e2e/egress_firewall.go +++ b/test/e2e/egress_firewall.go @@ -19,6 +19,7 @@ import ( "github.com/onsi/ginkgo/extensions/table" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,7 +35,7 @@ import ( // Validate the egress firewall policies by applying a policy and verify // that both explicitly allowed traffic and implicitly denied traffic // is properly handled as defined in the crd configuration in the test. -var _ = ginkgo.Describe("e2e egress firewall policy validation", func() { +var _ = ginkgo.Describe("e2e egress firewall policy validation", feature.EgressFirewall, func() { const ( svcname string = "egress-firewall-policy" egressFirewallYamlFile string = "egress-fw.yml" @@ -196,7 +197,7 @@ var _ = ginkgo.Describe("e2e egress firewall policy validation", func() { Name: externalContainerName1, Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: []string{"netexec", fmt.Sprintf("--http-port=%d", externalContainer1Port)}, + CmdArgs: []string{"netexec", fmt.Sprintf("--http-port=%d", externalContainer1Port)}, ExtPort: externalContainer1Port, } externalContainer1, err = providerCtx.CreateExternalContainer(externalContainer1Spec) @@ -209,7 +210,7 @@ var _ = ginkgo.Describe("e2e egress firewall policy validation", func() { Name: externalContainerName2, Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: []string{"netexec", fmt.Sprintf("--http-port=%d", externalContainer2Port)}, + CmdArgs: []string{"netexec", fmt.Sprintf("--http-port=%d", externalContainer2Port)}, ExtPort: externalContainer2Port, } externalContainer2, err = providerCtx.CreateExternalContainer(externalContainer2Spec) diff --git a/test/e2e/egress_services.go b/test/e2e/egress_services.go index eb9cb38942..ee2fec30f4 100644 --- a/test/e2e/egress_services.go +++ b/test/e2e/egress_services.go @@ -13,6 +13,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" @@ -32,7 +33,7 @@ import ( utilnet "k8s.io/utils/net" ) -var _ = ginkgo.Describe("EgressService", func() { +var _ = ginkgo.Describe("EgressService", feature.EgressService, func() { const ( egressServiceYAML = "egress_service.yaml" externalContainerName = "external-container-for-egress-service" @@ -84,7 +85,7 @@ var _ = ginkgo.Describe("EgressService", func() { framework.ExpectNoError(err, "failed to get primary provider network") externalContainer = infraapi.ExternalContainer{Name: externalContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, ExtPort: 8080, - Args: getAgnHostHTTPPortBindCMDArgs(8080)} + CmdArgs: getAgnHostHTTPPortBindCMDArgs(8080)} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external container") }) @@ -1238,7 +1239,7 @@ metadata: ginkgo.By(fmt.Sprintf("Creating container %s", net.containerName)) // Setting the --hostname here is important since later we poke the container's /hostname endpoint extContainerSecondaryNet := infraapi.ExternalContainer{Name: net.containerName, Image: images.AgnHost(), Network: network, - Args: []string{"netexec", "--http-port=8080"}, ExtPort: 8080} + CmdArgs: []string{"netexec", "--http-port=8080"}, ExtPort: 8080} extContainerSecondaryNet, err = providerCtx.CreateExternalContainer(extContainerSecondaryNet) ginkgo.By(fmt.Sprintf("Adding a listener for the shared IPv4 %s on %s", sharedIPv4, net.containerName)) out, err := infraprovider.Get().ExecExternalContainerCommand(extContainerSecondaryNet, []string{"ip", "address", "add", sharedIPv4 + "/32", "dev", "lo"}) diff --git a/test/e2e/egressip.go b/test/e2e/egressip.go index 162af8fad0..b2f75254f7 100644 --- a/test/e2e/egressip.go +++ b/test/e2e/egressip.go @@ -20,6 +20,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/types" "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" @@ -218,7 +219,7 @@ func isSupportedAgnhostForEIP(externalContainer infraapi.ExternalContainer) bool if externalContainer.Image != images.AgnHost() { return false } - if !util.SliceHasStringItem(externalContainer.Args, "netexec") { + if !util.SliceHasStringItem(externalContainer.CmdArgs, "netexec") { return false } return true @@ -377,7 +378,7 @@ type egressIPs struct { Items []egressIP `json:"items"` } -var _ = ginkgo.DescribeTableSubtree("e2e egress IP validation", func(netConfigParams networkAttachmentConfigParams) { +var _ = ginkgo.DescribeTableSubtree("e2e egress IP validation", feature.EgressIP, func(netConfigParams networkAttachmentConfigParams) { //FIXME: tests for CDN are designed for single stack clusters (IPv4 or IPv6) and must choose a single IP family for dual stack clusters. // Remove this restriction and allow the tests to detect if an IP family support is available. const ( @@ -701,6 +702,7 @@ var _ = ginkgo.DescribeTableSubtree("e2e egress IP validation", func(netConfigPa if len(nodes.Items) < 3 { framework.Failf("Test requires >= 3 Ready nodes, but there are only %v nodes", len(nodes.Items)) } + netConfigParams.cidr = filterCIDRsAndJoin(f.ClientSet, netConfigParams.cidr) if isSupported, reason := isNetworkSupported(nodes, netConfigParams); !isSupported { ginkgo.Skip(reason) } @@ -752,13 +754,13 @@ var _ = ginkgo.DescribeTableSubtree("e2e egress IP validation", func(netConfigPa // attach containers to the primary network primaryTargetExternalContainerPort := infraprovider.Get().GetExternalContainerPort() primaryTargetExternalContainerSpec := infraapi.ExternalContainer{Name: targetNodeName, Image: images.AgnHost(), - Network: primaryProviderNetwork, Args: getAgnHostHTTPPortBindCMDArgs(primaryTargetExternalContainerPort), ExtPort: primaryTargetExternalContainerPort} + Network: primaryProviderNetwork, CmdArgs: getAgnHostHTTPPortBindCMDArgs(primaryTargetExternalContainerPort), ExtPort: primaryTargetExternalContainerPort} primaryTargetExternalContainer, err = providerCtx.CreateExternalContainer(primaryTargetExternalContainerSpec) framework.ExpectNoError(err, "failed to create external target container on primary network", primaryTargetExternalContainerSpec.String()) primaryDeniedExternalContainerPort := infraprovider.Get().GetExternalContainerPort() primaryDeniedExternalContainerSpec := infraapi.ExternalContainer{Name: deniedTargetNodeName, Image: images.AgnHost(), - Network: primaryProviderNetwork, Args: getAgnHostHTTPPortBindCMDArgs(primaryDeniedExternalContainerPort), ExtPort: primaryDeniedExternalContainerPort} + Network: primaryProviderNetwork, CmdArgs: getAgnHostHTTPPortBindCMDArgs(primaryDeniedExternalContainerPort), ExtPort: primaryDeniedExternalContainerPort} primaryDeniedExternalContainer, err = providerCtx.CreateExternalContainer(primaryDeniedExternalContainerSpec) framework.ExpectNoError(err, "failed to create external denied container on primary network", primaryDeniedExternalContainer.String()) @@ -789,7 +791,7 @@ var _ = ginkgo.DescribeTableSubtree("e2e egress IP validation", func(netConfigPa Name: targetSecondaryNodeName, Image: images.AgnHost(), Network: secondaryProviderNetwork, - Args: getAgnHostHTTPPortBindCMDArgs(secondaryTargetExternalContainerPort), + CmdArgs: getAgnHostHTTPPortBindCMDArgs(secondaryTargetExternalContainerPort), ExtPort: secondaryTargetExternalContainerPort, } secondaryTargetExternalContainer, err = providerCtx.CreateExternalContainer(secondaryTargetExternalContainerSpec) @@ -970,7 +972,7 @@ spec: if isClusterDefaultNetwork(netConfigParams) { pod2IP = getPodAddress(pod2Name, f.Namespace.Name) } else { - pod2IP, err = podIPsForUserDefinedPrimaryNetwork( + pod2IP, err = getPodAnnotationIPsForAttachmentByIndex( f.ClientSet, f.Namespace.Name, pod2Name, @@ -2123,7 +2125,7 @@ spec: providerPrimaryNetwork, err := infraprovider.Get().PrimaryNetwork() framework.ExpectNoError(err, "failed to get providers primary network") externalContainerPrimary := infraapi.ExternalContainer{Name: "external-container-for-egressip-mtu-test", Image: images.AgnHost(), - Network: providerPrimaryNetwork, Args: []string{"pause"}, ExtPort: externalContainerPrimaryPort} + Network: providerPrimaryNetwork, CmdArgs: []string{"pause"}, ExtPort: externalContainerPrimaryPort} externalContainerPrimary, err = providerCtx.CreateExternalContainer(externalContainerPrimary) framework.ExpectNoError(err, "failed to create external container: %s", externalContainerPrimary.String()) @@ -3192,13 +3194,13 @@ spec: ginkgo.Entry("L3 Primary UDN", networkAttachmentConfigParams{ name: "l3primary", topology: types.Layer3Topology, - cidr: correctCIDRFamily("30.10.0.0/16", "2014:100:200::0/60"), + cidr: joinCIDRs("30.10.0.0/16", "2014:100:200::0/60"), role: "primary", }), ginkgo.Entry("L2 Primary UDN", networkAttachmentConfigParams{ name: "l2primary", topology: types.Layer2Topology, - cidr: correctCIDRFamily("10.10.0.0/16", "2014:100:200::0/60"), + cidr: joinCIDRs("10.10.0.0/16", "2014:100:200::0/60"), role: "primary", }), ) diff --git a/test/e2e/egressqos.go b/test/e2e/egressqos.go index 4f6b282027..0d32a9a514 100644 --- a/test/e2e/egressqos.go +++ b/test/e2e/egressqos.go @@ -10,6 +10,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "golang.org/x/sync/errgroup" v1 "k8s.io/api/core/v1" @@ -19,7 +20,7 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" ) -var _ = ginkgo.Describe("e2e EgressQoS validation", func() { +var _ = ginkgo.Describe("e2e EgressQoS validation", feature.EgressQos, func() { const ( egressQoSYaml = "egressqos.yaml" srcPodName = "src-dscp-pod" diff --git a/test/e2e/external_gateways.go b/test/e2e/external_gateways.go index c7bf83d9f9..c3b2f12198 100644 --- a/test/e2e/external_gateways.go +++ b/test/e2e/external_gateways.go @@ -12,6 +12,7 @@ import ( "time" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" @@ -41,6 +42,16 @@ const ( anyLink = "any" ) +// GatewayRemovalType defines ways to remove pod as external gateway +type GatewayRemovalType string + +const ( + GatewayUpdate GatewayRemovalType = "GatewayUpdate" + GatewayDelete GatewayRemovalType = "GatewayDelete" + GatewayDeletionTimestamp GatewayRemovalType = "GatewayDeletionTimestamp" + GatewayNotReady GatewayRemovalType = "GatewayNotReady" +) + func getOverrideNetwork() (string, string, string) { // When the env variable is specified, we use a different docker network for // containers acting as external gateways. @@ -74,7 +85,7 @@ type gatewayTestIPs struct { targetIPs []string } -var _ = ginkgo.Describe("External Gateway", func() { +var _ = ginkgo.Describe("External Gateway", feature.ExternalGateway, func() { const ( gwTCPPort = 80 @@ -133,7 +144,7 @@ var _ = ginkgo.Describe("External Gateway", func() { } externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer := infraapi.ExternalContainer{Name: getContainerName(gwContainerNameTemplate, externalContainerPort), - Image: images.AgnHost(), Network: network, ExtPort: externalContainerPort, Args: []string{"pause"}} + Image: images.AgnHost(), Network: network, ExtPort: externalContainerPort, CmdArgs: []string{"pause"}} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to start external gateway test container") if network.Name() == "host" { @@ -227,7 +238,7 @@ var _ = ginkgo.Describe("External Gateway", func() { // start the container that will act as a new external gateway that the tests will be updated to use externalContainer2Port := infraprovider.Get().GetExternalContainerPort() externalContainer2 := infraapi.ExternalContainer{Name: getContainerName(gwContainerNameTemplate2, externalContainerPort), - Image: images.AgnHost(), Network: network, ExtPort: externalContainer2Port, Args: []string{"pause"}} + Image: images.AgnHost(), Network: network, ExtPort: externalContainer2Port, CmdArgs: []string{"pause"}} externalContainer2, err = providerCtx.CreateExternalContainer(externalContainer2) framework.ExpectNoError(err, "failed to start external gateway test container %s", getContainerName(gwContainerNameTemplate2, externalContainerPort)) if network.Name() == "host" { @@ -354,7 +365,7 @@ var _ = ginkgo.Describe("External Gateway", func() { } externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer := infraapi.ExternalContainer{Name: getContainerName(gwContainerTemplate, externalContainerPort), Image: images.AgnHost(), Network: network, - Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to start external gateway test container %s", getContainerName(gwContainerTemplate, externalContainerPort)) if network.Name() == "host" { @@ -874,10 +885,15 @@ var _ = ginkgo.Describe("External Gateway", func() { ginkgo.Entry("IPV6 udp", &addressesv6, "udp"), ginkgo.Entry("IPV6 tcp", &addressesv6, "tcp")) - ginkgo.DescribeTable("ExternalGWPod annotation: Should validate conntrack entry deletion for TCP/UDP traffic via multiple external gateways a.k.a ECMP routes", func(addresses *gatewayTestIPs, protocol string, deletePod bool) { + ginkgo.DescribeTable("ExternalGWPod annotation: Should validate conntrack entry deletion for TCP/UDP traffic via multiple external gateways a.k.a ECMP routes", func(addresses *gatewayTestIPs, protocol string, removalType GatewayRemovalType) { if addresses.srcPodIP == "" || addresses.nodeIP == "" { skipper.Skipf("Skipping as pod ip / node ip are not set pod ip %s node ip %s", addresses.srcPodIP, addresses.nodeIP) } + + if removalType == GatewayNotReady { + recreatePodWithReadinessProbe(f, gatewayPodName2, nodes.Items[1].Name, servingNamespace, sleepCommand, nil) + } + ginkgo.By("Annotate the external gw pods to manage the src app pod namespace") for i, gwPod := range []string{gatewayPodName1, gatewayPodName2} { networkIPs := fmt.Sprintf("\"%s\"", addresses.gatewayIPs[i]) @@ -924,15 +940,9 @@ var _ = ginkgo.Describe("External Gateway", func() { totalPodConnEntries := pokeConntrackEntries(nodeName, addresses.srcPodIP, protocol, nil) gomega.Expect(totalPodConnEntries).To(gomega.Equal(6)) // total conntrack entries for this pod/protocol - if deletePod { - ginkgo.By(fmt.Sprintf("Delete second external gateway pod %s from ns %s", gatewayPodName2, servingNamespace)) - err = f.ClientSet.CoreV1().Pods(servingNamespace).Delete(context.TODO(), gatewayPodName2, metav1.DeleteOptions{}) - framework.ExpectNoError(err, "Delete the gateway pod failed: %v", err) - // give some time to handle pod delete event - time.Sleep(5 * time.Second) - } else { - ginkgo.By("Remove second external gateway pod's routing-namespace annotation") - annotatePodForGateway(gatewayPodName2, servingNamespace, "", addresses.gatewayIPs[1], false) + cleanUpFn := handleGatewayPodRemoval(f, removalType, gatewayPodName2, servingNamespace, addresses.gatewayIPs[1], true) + if cleanUpFn != nil { + defer cleanUpFn() } // ensure the conntrack deletion tracker annotation is updated @@ -972,12 +982,20 @@ var _ = ginkgo.Describe("External Gateway", func() { gomega.Expect(podConnEntriesWithMACLabelsSet).To(gomega.Equal(0)) // we don't have any remaining gateways left gomega.Expect(totalPodConnEntries).To(gomega.Equal(4)) // 6-2 }, - ginkgo.Entry("IPV4 udp", &addressesv4, "udp", false), - ginkgo.Entry("IPV4 tcp", &addressesv4, "tcp", false), - ginkgo.Entry("IPV6 udp", &addressesv6, "udp", false), - ginkgo.Entry("IPV6 tcp", &addressesv6, "tcp", false), - ginkgo.Entry("IPV4 udp + pod delete", &addressesv4, "udp", true), - ginkgo.Entry("IPV6 tcp + pod delete", &addressesv6, "tcp", true), + ginkgo.Entry("IPV4 udp + pod annotation update", &addressesv4, "udp", GatewayUpdate), + ginkgo.Entry("IPV4 tcp + pod annotation update", &addressesv4, "tcp", GatewayUpdate), + ginkgo.Entry("IPV6 udp + pod annotation update", &addressesv6, "udp", GatewayUpdate), + ginkgo.Entry("IPV6 tcp + pod annotation update", &addressesv6, "tcp", GatewayUpdate), + ginkgo.Entry("IPV4 udp + pod delete", &addressesv4, "udp", GatewayDelete), + ginkgo.Entry("IPV6 tcp + pod delete", &addressesv6, "tcp", GatewayDelete), + ginkgo.Entry("IPV4 udp + pod deletion timestamp", &addressesv4, "udp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV4 tcp + pod deletion timestamp", &addressesv4, "tcp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV6 udp + pod deletion timestamp", &addressesv6, "udp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV6 tcp + pod deletion timestamp", &addressesv6, "tcp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV4 udp + pod not ready", &addressesv4, "udp", GatewayNotReady), + ginkgo.Entry("IPV4 tcp + pod not ready", &addressesv4, "tcp", GatewayNotReady), + ginkgo.Entry("IPV6 udp + pod not ready", &addressesv6, "udp", GatewayNotReady), + ginkgo.Entry("IPV6 tcp + pod not ready", &addressesv6, "tcp", GatewayNotReady), ) }) @@ -1982,11 +2000,15 @@ var _ = ginkgo.Describe("External Gateway", func() { ginkgo.Entry("IPV6 udp", &addressesv6, "udp"), ginkgo.Entry("IPV6 tcp", &addressesv6, "tcp")) - ginkgo.DescribeTable("Dynamic Hop: Should validate conntrack entry deletion for TCP/UDP traffic via multiple external gateways a.k.a ECMP routes", func(addresses *gatewayTestIPs, protocol string) { + ginkgo.DescribeTable("Dynamic Hop: Should validate conntrack entry deletion for TCP/UDP traffic via multiple external gateways a.k.a ECMP routes", func(addresses *gatewayTestIPs, protocol string, removalType GatewayRemovalType) { if addresses.srcPodIP == "" || addresses.nodeIP == "" { skipper.Skipf("Skipping as pod ip / node ip are not set pod ip %s node ip %s", addresses.srcPodIP, addresses.nodeIP) } + if removalType == GatewayNotReady { + recreatePodWithReadinessProbe(f, gatewayPodName2, nodes.Items[1].Name, servingNamespace, sleepCommand, map[string]string{"name": gatewayPodName2, "gatewayPod": "true"}) + } + for i, gwPod := range []string{gatewayPodName1, gatewayPodName2} { annotateMultusNetworkStatusInPodGateway(gwPod, servingNamespace, []string{addresses.gatewayIPs[i], addresses.gatewayIPs[i]}) } @@ -2025,10 +2047,10 @@ var _ = ginkgo.Describe("External Gateway", func() { }, time.Minute, 5).Should(gomega.Equal(podConnEntriesWithMACLabelsSet)) gomega.Expect(pokeConntrackEntries(nodeName, addresses.srcPodIP, protocol, nil)).To(gomega.Equal(totalPodConnEntries)) // total conntrack entries for this pod/protocol - ginkgo.By("Remove second external gateway pod's routing-namespace annotation") - p := getGatewayPod(f, servingNamespace, gatewayPodName2) - p.Labels = map[string]string{"name": gatewayPodName2} - updatePod(f, p) + cleanUpFn := handleGatewayPodRemoval(f, removalType, gatewayPodName2, servingNamespace, addresses.gatewayIPs[1], false) + if cleanUpFn != nil { + defer cleanUpFn() + } ginkgo.By("Check if conntrack entries for ECMP routes are removed for the deleted external gateway if traffic is UDP") @@ -2043,7 +2065,7 @@ var _ = ginkgo.Describe("External Gateway", func() { gomega.Expect(pokeConntrackEntries(nodeName, addresses.srcPodIP, protocol, nil)).To(gomega.Equal(totalPodConnEntries)) ginkgo.By("Remove first external gateway pod's routing-namespace annotation") - p = getGatewayPod(f, servingNamespace, gatewayPodName1) + p := getGatewayPod(f, servingNamespace, gatewayPodName1) p.Labels = map[string]string{"name": gatewayPodName1} updatePod(f, p) @@ -2059,11 +2081,19 @@ var _ = ginkgo.Describe("External Gateway", func() { gomega.Expect(pokeConntrackEntries(nodeName, addresses.srcPodIP, protocol, nil)).To(gomega.Equal(totalPodConnEntries)) checkAPBExternalRouteStatus(defaultPolicyName) }, - ginkgo.Entry("IPV4 udp", &addressesv4, "udp"), - ginkgo.Entry("IPV4 tcp", &addressesv4, "tcp"), - ginkgo.Entry("IPV6 udp", &addressesv6, "udp"), - ginkgo.Entry("IPV6 tcp", &addressesv6, "tcp")) - + ginkgo.Entry("IPV4 udp + pod annotation update", &addressesv4, "udp", GatewayUpdate), + ginkgo.Entry("IPV4 tcp + pod annotation update", &addressesv4, "tcp", GatewayUpdate), + ginkgo.Entry("IPV6 udp + pod annotation update", &addressesv6, "udp", GatewayUpdate), + ginkgo.Entry("IPV6 tcp + pod annotation update", &addressesv6, "tcp", GatewayUpdate), + ginkgo.Entry("IPV4 udp + pod deletion timestamp", &addressesv4, "udp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV4 tcp + pod deletion timestamp", &addressesv4, "tcp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV6 udp + pod deletion timestamp", &addressesv6, "udp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV6 tcp + pod deletion timestamp", &addressesv6, "tcp", GatewayDeletionTimestamp), + ginkgo.Entry("IPV4 udp + pod not ready", &addressesv4, "udp", GatewayNotReady), + ginkgo.Entry("IPV4 tcp + pod not ready", &addressesv4, "tcp", GatewayNotReady), + ginkgo.Entry("IPV6 udp + pod not ready", &addressesv6, "udp", GatewayNotReady), + ginkgo.Entry("IPV6 tcp + pod not ready", &addressesv6, "tcp", GatewayNotReady), + ) }) // BFD Tests are dual of external gateway. The only difference is that they enable BFD on ovn and @@ -2892,9 +2922,9 @@ func setupGatewayContainers(f *framework.Framework, providerCtx infraapi.Context var err error externalContainer1 := infraapi.ExternalContainer{Name: getContainerName(container1Template, uint16(gwTCPPort)), - Image: externalContainerImage, Network: network, Args: []string{}, ExtPort: uint16(gwTCPPort)} + Image: externalContainerImage, Network: network, CmdArgs: []string{}, ExtPort: uint16(gwTCPPort)} externalContainer2 := infraapi.ExternalContainer{Name: getContainerName(container2Template, uint16(gwTCPPort)), - Image: externalContainerImage, Network: network, Args: []string{}, ExtPort: uint16(gwTCPPort)} + Image: externalContainerImage, Network: network, CmdArgs: []string{}, ExtPort: uint16(gwTCPPort)} gwContainers := []infraapi.ExternalContainer{externalContainer1, externalContainer2} addressesv4 := gatewayTestIPs{targetIPs: make([]string, 0)} @@ -3145,12 +3175,12 @@ func setupGatewayContainersForConntrackTest(f *framework.Framework, providerCtx addressesv6 := gatewayTestIPs{gatewayIPs: make([]string, 2)} ginkgo.By("Creating the gateway containers for the UDP test") gwExternalContainer1 := infraapi.ExternalContainer{Name: getContainerName(gwContainer1Template, 12345), - Image: images.IPerf3(), Network: network, Args: []string{}, ExtPort: 12345} + Image: images.IPerf3(), Network: network, CmdArgs: []string{}, ExtPort: 12345} gwExternalContainer1, err = providerCtx.CreateExternalContainer(gwExternalContainer1) framework.ExpectNoError(err, "failed to create external container (%s)", gwExternalContainer1) gwExternalContainer2 := infraapi.ExternalContainer{Name: getContainerName(gwContainer2Template, 12345), - Image: images.IPerf3(), Network: network, Args: []string{}, ExtPort: 12345} + Image: images.IPerf3(), Network: network, CmdArgs: []string{}, ExtPort: 12345} gwExternalContainer2, err = providerCtx.CreateExternalContainer(gwExternalContainer2) framework.ExpectNoError(err, "failed to create external container (%s)", gwExternalContainer2) if network.Name() == "host" { @@ -3594,3 +3624,133 @@ func resetGatewayAnnotations(f *framework.Framework) { annotation}...) } } + +func setupPodWithReadinessProbe(f *framework.Framework, podName, nodeSelector, namespace string, command []string, labels map[string]string) (*corev1.Pod, error) { + // Handle bash -c commands specially to preserve argument structure + if len(command) >= 3 && command[0] == "bash" && command[1] == "-c" { + // Extract the script part and wrap it to preserve logic + script := strings.Join(command[2:], " ") + command = []string{"bash", "-c", "touch /tmp/ready && (" + script + ")"} + } else { + // For non-bash commands, preserve their structure + var quotedArgs []string + for _, arg := range command { + // Escape single quotes and wrap in single quotes + escaped := strings.ReplaceAll(arg, "'", "'\"'\"'") + quotedArgs = append(quotedArgs, "'"+escaped+"'") + } + command = []string{"bash", "-c", "touch /tmp/ready && " + strings.Join(quotedArgs, " ")} + } + return createPod(f, podName, nodeSelector, namespace, command, labels, func(p *corev1.Pod) { + p.Spec.Containers[0].ReadinessProbe = &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + Exec: &corev1.ExecAction{ + Command: []string{"cat", "/tmp/ready"}, + }, + }, + InitialDelaySeconds: 5, + PeriodSeconds: 5, + FailureThreshold: 1, + } + }) +} + +func recreatePodWithReadinessProbe(f *framework.Framework, podName, nodeSelector, namespace string, command []string, labels map[string]string) { + ginkgo.By(fmt.Sprintf("Delete second external gateway pod %s from ns %s", podName, namespace)) + err := deletePodWithWaitByName(context.TODO(), f.ClientSet, podName, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Delete second external gateway pod %s from ns %s, failed: %v", podName, namespace, err)) + + ginkgo.By(fmt.Sprintf("Create second external gateway pod %s from ns %s with readiness probe", podName, namespace)) + _, err = setupPodWithReadinessProbe(f, podName, nodeSelector, namespace, command, labels) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Create second external gateway pod %s from ns %s with readiness probe, failed: %v", podName, namespace, err)) + gomega.Eventually(func() bool { + var p *corev1.Pod + p, err = f.ClientSet.CoreV1().Pods(namespace).Get(context.Background(), podName, metav1.GetOptions{}) + if err != nil { + return false + } + for _, condition := range p.Status.Conditions { + if condition.Type == corev1.PodReady { + return condition.Status == corev1.ConditionTrue + } + } + return false + }).Should(gomega.Equal(true), fmt.Sprintf("Readiness probe for second external gateway pod %s from ns %s, failed: %v", podName, namespace, err)) +} + +func handleGatewayPodRemoval(f *framework.Framework, removalType GatewayRemovalType, gatewayPodName, servingNamespace, gatewayIP string, isAnnotated bool) func() { + var err error + switch removalType { + case GatewayDelete: + ginkgo.By(fmt.Sprintf("Delete second external gateway pod %s from ns %s", gatewayPodName, servingNamespace)) + err := deletePodWithWaitByName(context.TODO(), f.ClientSet, gatewayPodName, servingNamespace) + framework.ExpectNoError(err, "Delete the gateway pod failed: %v", err) + return nil + case GatewayUpdate: + if isAnnotated { + ginkgo.By("Remove second external gateway pod's routing-namespace annotation") + annotatePodForGateway(gatewayPodName, servingNamespace, "", gatewayIP, false) + return nil + } + + ginkgo.By("Updating external gateway pod labels") + p := getGatewayPod(f, servingNamespace, gatewayPodName) + p.Labels = map[string]string{"name": gatewayPodName} + updatePod(f, p) + return nil + case GatewayDeletionTimestamp: + ginkgo.By("Setting finalizer then deleting external gateway pod with grace period to set deletion timestamp") + p := getGatewayPod(f, servingNamespace, gatewayPodName) + p.Finalizers = append(p.Finalizers, "k8s.ovn.org/external-gw-pod-finalizer") + updatePod(f, p) + gomega.Eventually(func() bool { + p, err = f.ClientSet.CoreV1().Pods(servingNamespace).Get(context.Background(), gatewayPodName, metav1.GetOptions{}) + if err != nil { + return false + } + return strings.Contains(strings.Join(p.GetFinalizers(), ","), "k8s.ovn.org/external-gw-pod-finalizer") + }).Should(gomega.Equal(true), fmt.Sprintf("Update second external gateway pod %s from ns %s with finalizer, failed: %v", gatewayPodName, servingNamespace, err)) + + p = getGatewayPod(f, servingNamespace, gatewayPodName) + err = e2epod.DeletePodWithGracePeriod(context.Background(), f.ClientSet, p, 1000) + framework.ExpectNoError(err, fmt.Sprintf("unable to delete pod with grace period: %s, err: %v", p.Name, err)) + + gomega.Eventually(func() bool { + p, err = f.ClientSet.CoreV1().Pods(servingNamespace).Get(context.Background(), gatewayPodName, metav1.GetOptions{}) + if err != nil { + return false + } + return p.DeletionTimestamp != nil + }).Should(gomega.BeTrue(), fmt.Sprintf("Gateway pod %s in ns %s should have deletion timestamp, failed: %v", gatewayPodName, servingNamespace, err)) + + // return a function to remove the finalizer + return func() { + p = getGatewayPod(f, servingNamespace, gatewayPodName) + p.Finalizers = []string{} + updatePod(f, p) + } + case GatewayNotReady: + ginkgo.By("Remove /tmp/ready in external gateway pod so that readiness probe fails") + _, err = e2ekubectl.RunKubectl(servingNamespace, "exec", gatewayPodName, "--", "rm", "/tmp/ready") + framework.ExpectNoError(err, fmt.Sprintf("unable to remove /tmp/ready in pod: %s, err: %v", gatewayPodName, err)) + gomega.Eventually(func() bool { + var p *corev1.Pod + p, err = f.ClientSet.CoreV1().Pods(servingNamespace).Get(context.Background(), gatewayPodName, metav1.GetOptions{}) + if err != nil { + return false + } + podReadyStatus := corev1.ConditionTrue + for _, condition := range p.Status.Conditions { + if condition.Type == corev1.PodReady { + podReadyStatus = condition.Status + break + } + } + return podReadyStatus == corev1.ConditionFalse + }).WithTimeout(5*time.Minute).Should(gomega.Equal(true), fmt.Sprintf("Mark second external gateway pod %s from ns %s not ready, failed: %v", gatewayPodName, servingNamespace, err)) + return nil + default: + framework.Failf("unexpected GatewayRemovalType passed: %s", removalType) + return nil + } +} diff --git a/test/e2e/feature/features.go b/test/e2e/feature/features.go new file mode 100644 index 0000000000..e7c3920477 --- /dev/null +++ b/test/e2e/feature/features.go @@ -0,0 +1,32 @@ +package feature + +import ( + "github.com/onsi/ginkgo/v2" + "github.com/ovn-org/ovn-kubernetes/test/e2e/label" +) + +var ( + Service = New("Service") + NetworkPolicy = New("NetworkPolicy") + AdminNetworkPolicy = New("AdminNetworkPolicy") + BaselineNetworkPolicy = New("BaselineNetworkPolicy") + NetworkSegmentation = New("NetworkSegmentation") + EgressIP = New("EgressIP") + EgressService = New("EgressService") + EgressFirewall = New("EgressFirewall") + EgressQos = New("EgressQos") + ExternalGateway = New("ExternalGateway") + DisablePacketMTUCheck = New("DisablePacketMTUCheck") + VirtualMachineSupport = New("VirtualMachineSupport") + Interconnect = New("Interconnect") + Multicast = New("Multicast") + MultiHoming = New("MultiHoming") + NodeIPMACMigration = New("NodeIPMACMigration") + OVSCPUPin = New("OVSCPUPin") + RouteAdvertisements = New("RouteAdvertisements") + Unidle = New("Unidle") +) + +func New(name string) ginkgo.Labels { + return label.New("Feature", name).GinkgoLabel() +} diff --git a/test/e2e/gateway_mtu.go b/test/e2e/gateway_mtu.go index 386ecba5d3..ec3b3b48d9 100644 --- a/test/e2e/gateway_mtu.go +++ b/test/e2e/gateway_mtu.go @@ -5,12 +5,13 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" e2enode "k8s.io/kubernetes/test/e2e/framework/node" ) -var _ = ginkgo.Describe("Check whether gateway-mtu-support annotation on node is set based on disable-pkt-mtu-check value", func() { +var _ = ginkgo.Describe("Check whether gateway-mtu-support annotation on node is set based on disable-pkt-mtu-check value", feature.DisablePacketMTUCheck, func() { var nodes *v1.NodeList f := wrappedTestFramework("gateway-mtu-support") diff --git a/test/e2e/go.mod b/test/e2e/go.mod index 6a865f71ee..d9d67fb0c4 100644 --- a/test/e2e/go.mod +++ b/test/e2e/go.mod @@ -6,18 +6,18 @@ toolchain go1.23.6 require ( github.com/google/go-cmp v0.6.0 - github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha + github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.36.1 github.com/pkg/errors v0.9.1 - golang.org/x/sync v0.8.0 - k8s.io/api v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/client-go v0.32.3 + golang.org/x/sync v0.12.0 + k8s.io/api v0.32.5 + k8s.io/apimachinery v0.32.5 + k8s.io/client-go v0.32.5 k8s.io/klog v1.0.0 - k8s.io/kubernetes v1.32.3 + k8s.io/kubernetes v1.32.6 k8s.io/pod-security-admission v0.32.3 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ) @@ -145,13 +145,13 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.28.0 // indirect + golang.org/x/crypto v0.36.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect @@ -192,7 +192,7 @@ require ( require ( github.com/containernetworking/plugins v1.2.0 github.com/coreos/butane v0.18.0 - github.com/docker/docker v26.1.4+incompatible + github.com/docker/docker v26.1.5+incompatible github.com/google/goexpect v0.0.0-20210430020637-ab937bf7fd6f github.com/onsi/ginkgo v1.16.5 github.com/openshift-kni/k8sreporter v1.0.6 diff --git a/test/e2e/go.sum b/test/e2e/go.sum index 6fee7ac542..900d7aa612 100644 --- a/test/e2e/go.sum +++ b/test/e2e/go.sum @@ -122,8 +122,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.4+incompatible h1:vuTpXDuoga+Z38m1OZHzl7NKisKWaWlhjQk7IDPSLsU= -github.com/docker/docker v26.1.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g= +github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -331,8 +331,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47 h1:iSncnlC+rtlNOIpPa3fbqQMhpTscGJIlkiWaPl1VcS4= github.com/k8snetworkplumbingwg/govdpa v0.1.5-0.20230926073613-07c1031aea47/go.mod h1:SPaDIyUmwN03Bgn0u/mhoiE4o/+koeKh11VUsdsUX0U= -github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha h1:ss+EP77GlQmh90hGKpnAG4Q3VVxRlB7GoncemaPtO4g= -github.com/k8snetworkplumbingwg/ipamclaims v0.4.0-alpha/go.mod h1:qlR+sKxQ2OGfwhFCuXSd7rJ/GgC38vQBeHKQ7f2YnpI= +github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha h1:b3iHeks/KTzhG2dNanaUZcFEJwJbYBZY16jxCaVv9i8= +github.com/k8snetworkplumbingwg/ipamclaims v0.5.0-alpha/go.mod h1:MGaMX1tJ7MlHDee4/xmqp3guQh+eDiuCLAauqD9K11Q= github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1 h1:Egj1hEVYNXWFlKpgzAXxe/2o8VNiVcAJLrKzlinILQo= github.com/k8snetworkplumbingwg/multi-networkpolicy v1.0.1/go.mod h1:kEJ4WM849yNmXekuSXLRwb+LaZ9usC06O8JgoAIq+f4= github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.6.0 h1:BT3ghAY0q7lWib9rz+tVXDFkm27dJV6SLCn7TunZwo4= @@ -581,8 +581,8 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -665,8 +665,8 @@ golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -676,8 +676,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -690,8 +690,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -752,15 +752,15 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -771,8 +771,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -986,19 +986,19 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.22.7/go.mod h1:7hejA1BgBEiSsWljUyRkIjj+AISXO16IwsaDgFjJsQE= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/api v0.32.5 h1:uqjjsYo1kTJr5NIcoIaP9F+TgXgADH7nKQx91FDAhtk= +k8s.io/api v0.32.5/go.mod h1:bXXFU3fGCZ/eFMZvfHZC69PeGbXEL4zzjuPVzOxHF64= k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= k8s.io/apimachinery v0.22.7/go.mod h1:ZvVLP5iLhwVFg2Yx9Gh5W0um0DUauExbRhe+2Z8I1EU= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apimachinery v0.32.5 h1:6We3aJ6crC0ap8EhsEXcgX3LpI6SEjubpiOMXLROwPM= +k8s.io/apimachinery v0.32.5/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc= k8s.io/client-go v0.22.7/go.mod h1:pGU/tWSzzvsYT7M3npHhoZ3Jh9qJTTIvFvDtWuW31dw= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/client-go v0.32.5 h1:huFmQMzgWu0z4kbWsuZci+Gt4Fo72I4CcrvhToZ/Qp0= +k8s.io/client-go v0.32.5/go.mod h1:Qchw6f9WIVrur7DKojAHpRgGLcANT0RLIvF39Jz58xA= k8s.io/cloud-provider v0.32.3 h1:WC7KhWrqXsU4b0E4tjS+nBectGiJbr1wuc1TpWXvtZM= k8s.io/cloud-provider v0.32.3/go.mod h1:/fwBfgRPuh16n8vLHT+PPT+Bc4LAEaJYj38opO2wsYY= k8s.io/code-generator v0.22.7/go.mod h1:iOZwYADSgFPNGWfqHFfg1V0TNJnl1t0WyZluQp4baqU= @@ -1043,8 +1043,8 @@ k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI= k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg= k8s.io/kubelet v0.32.3 h1:B9HzW4yB67flx8tN2FYuDwZvxnmK3v5EjxxFvOYjmc8= k8s.io/kubelet v0.32.3/go.mod h1:yyAQSCKC+tjSlaFw4HQG7Jein+vo+GeKBGdXdQGvL1U= -k8s.io/kubernetes v1.32.3 h1:2A58BlNME8NwsMawmnM6InYo3Jf35Nw5G79q46kXwoA= -k8s.io/kubernetes v1.32.3/go.mod h1:GvhiBeolvSRzBpFlgM0z/Bbu3Oxs9w3P6XfEgYaMi8k= +k8s.io/kubernetes v1.32.6 h1:tp1gRjOqZjaoFBek5PN6eSmODdS1QRrH5UKiFP8ZByg= +k8s.io/kubernetes v1.32.6/go.mod h1:REY0Gok66BTTrbGyZaFMNKO9JhxvgBDW9B7aksWRFoY= k8s.io/mount-utils v0.32.3 h1:ZPXXHblfBhYP89OnaozpFg9Ojl6HhDfxBLcdWNkaxW8= k8s.io/mount-utils v0.32.3/go.mod h1:Kun5c2svjAPx0nnvJKYQWhfeNW+O0EpzHgRhDcYoSY0= k8s.io/pod-security-admission v0.32.3 h1:scV0PQc3PdD6sXOMHukPZOCzGCGZeVN5z999gHBpkOc= diff --git a/test/e2e/infraprovider/api/api.go b/test/e2e/infraprovider/api/api.go index 5ef104b7f3..1d2d3466fb 100644 --- a/test/e2e/infraprovider/api/api.go +++ b/test/e2e/infraprovider/api/api.go @@ -4,6 +4,8 @@ import ( "errors" "fmt" "strings" + + "k8s.io/kubernetes/test/e2e/framework" ) // Provider represents the infrastructure provider @@ -37,6 +39,21 @@ type Provider interface { GetK8HostPort() uint16 // supported K8 host ports } +// Underlay represents the configuration for an underlay network. +// Note: The physical network referenced by PhysicalNetworkName must be pre-created and available. +type Underlay struct { + // PhysicalNetworkName is the name of the pre-created physical network to use. + PhysicalNetworkName string + // LogicalNetworkName is the logical network name to be used. + LogicalNetworkName string + // BridgeName is the name of the bridge associated with the underlay. + BridgeName string + // PortName is the name of the port on the bridge. + PortName string + // VlanID is the VLAN identifier for the underlay network. + VlanID int +} + type Context interface { CreateExternalContainer(container ExternalContainer) (ExternalContainer, error) DeleteExternalContainer(container ExternalContainer) error @@ -46,6 +63,7 @@ type Context interface { AttachNetwork(network Network, instance string) (NetworkInterface, error) DetachNetwork(network Network, instance string) error GetAttachedNetworks() (Networks, error) + SetupUnderlay(f *framework.Framework, underlay Underlay) error AddCleanUpFn(func() error) } @@ -164,13 +182,15 @@ func (n NetworkInterface) GetMAC() string { } type ExternalContainer struct { - Name string - Image string - Network Network - Args []string - ExtPort uint16 - IPv4 string - IPv6 string + Name string + Image string + Network Network + Entrypoint string + CmdArgs []string + ExtPort uint16 + IPv4 string + IPv6 string + RuntimeArgs []string } func (ec ExternalContainer) GetName() string { @@ -208,7 +228,7 @@ func (ec ExternalContainer) IsIPv6() bool { } func (ec ExternalContainer) String() string { - str := fmt.Sprintf("Name: %q, Image: %q, Network: %q, Command: %q", ec.Name, ec.Image, ec.Network, strings.Join(ec.Args, " ")) + str := fmt.Sprintf("Name: %q, Image: %q, Network: %q, RuntimeArgs: %q, Command: %q", ec.Name, ec.Image, ec.Network, strings.Join(ec.RuntimeArgs, " "), strings.Join(ec.CmdArgs, " ")) if ec.IsIPv4() { str = fmt.Sprintf("%s, IPv4 address: %q", str, ec.GetIPv4()) } @@ -229,9 +249,6 @@ func (ec ExternalContainer) IsValidPreCreateContainer() (bool, error) { if ec.Network.String() == "" { errs = append(errs, errors.New("network is not set")) } - if ec.ExtPort == 0 { - errs = append(errs, errors.New("port is not set")) - } if len(errs) == 0 { return true, nil } diff --git a/test/e2e/infraprovider/providers/kind/kind.go b/test/e2e/infraprovider/providers/kind/kind.go index 9e1fe63e47..8c068c7411 100644 --- a/test/e2e/infraprovider/providers/kind/kind.go +++ b/test/e2e/infraprovider/providers/kind/kind.go @@ -13,10 +13,12 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/ovn-org/ovn-kubernetes/test/e2e/containerengine" + "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/portalloc" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" utilnet "k8s.io/utils/net" @@ -145,9 +147,13 @@ func (c *contextKind) createExternalContainer(container api.ExternalContainer) ( return container, fmt.Errorf("container %s already exists", container.Name) } cmd := []string{"run", "-itd", "--privileged", "--name", container.Name, "--network", container.Network.Name(), "--hostname", container.Name} + if container.Entrypoint != "" { + cmd = append(cmd, "--entrypoint", container.Entrypoint) + } + cmd = append(cmd, container.RuntimeArgs...) cmd = append(cmd, container.Image) - if len(container.Args) > 0 { - cmd = append(cmd, container.Args...) + if len(container.CmdArgs) > 0 { + cmd = append(cmd, container.CmdArgs...) } else { if images.AgnHost() == container.Image { cmd = append(cmd, "pause") @@ -359,6 +365,83 @@ func (c *contextKind) getAttachedNetworks() (api.Networks, error) { return attachedNetworks, nil } +func (c *contextKind) SetupUnderlay(f *framework.Framework, underlay api.Underlay) error { + if underlay.LogicalNetworkName == "" { + return fmt.Errorf("underlay logical network name must be set") + } + + if underlay.PhysicalNetworkName == "" { + underlay.PhysicalNetworkName = "underlay" + } + + if underlay.BridgeName == "" { + underlay.BridgeName = secondaryBridge + } + + const ( + ovsKubeNodeLabel = "app=ovnkube-node" + ) + + ovsPodList, err := f.ClientSet.CoreV1().Pods(deploymentconfig.Get().OVNKubernetesNamespace()).List( + context.Background(), + metav1.ListOptions{LabelSelector: ovsKubeNodeLabel}, + ) + if err != nil { + return fmt.Errorf("failed to list OVS pods with label %q at namespace %q: %w", ovsKubeNodeLabel, deploymentconfig.Get().OVNKubernetesNamespace(), err) + } + + if len(ovsPodList.Items) == 0 { + return fmt.Errorf("no pods with label %q in namespace %q", ovsKubeNodeLabel, deploymentconfig.Get().OVNKubernetesNamespace()) + } + for _, ovsPod := range ovsPodList.Items { + if underlay.BridgeName != deploymentconfig.Get().ExternalBridgeName() { + underlayInterface, err := getNetworkInterface(ovsPod.Spec.NodeName, underlay.PhysicalNetworkName) + if err != nil { + return fmt.Errorf("failed to get underlay interface for network %s on node %s: %w", underlay.PhysicalNetworkName, ovsPod.Spec.NodeName, err) + } + c.AddCleanUpFn(func() error { + if err := removeOVSBridge(ovsPod.Namespace, ovsPod.Name, underlay.BridgeName); err != nil { + return fmt.Errorf("failed to remove OVS bridge %s for pod %s/%s during cleanup: %w", underlay.BridgeName, ovsPod.Namespace, ovsPod.Name, err) + } + return nil + }) + if err := ensureOVSBridge(ovsPod.Namespace, ovsPod.Name, underlay.BridgeName); err != nil { + return fmt.Errorf("failed to add OVS bridge %s for pod %s/%s: %w", underlay.BridgeName, ovsPod.Namespace, ovsPod.Name, err) + } + + if err := ovsAttachPortToBridge(ovsPod.Namespace, ovsPod.Name, underlay.BridgeName, underlayInterface.InfName); err != nil { + return fmt.Errorf("failed to attach port %s to bridge %s for pod %s/%s: %w", underlayInterface.InfName, underlay.BridgeName, ovsPod.Namespace, ovsPod.Name, err) + } + if underlay.VlanID > 0 { + if err := ovsEnableVLANAccessPort(ovsPod.Namespace, ovsPod.Name, underlay.BridgeName, underlayInterface.InfName, underlay.VlanID); err != nil { + return fmt.Errorf("failed to enable VLAN %d on port %s for bridge %s for pod %s/%s: %w", underlay.VlanID, underlayInterface.InfName, underlay.BridgeName, ovsPod.Namespace, ovsPod.Name, err) + } + } + } + c.AddCleanUpFn(func() error { + if err := configureBridgeMappings( + ovsPod.Namespace, + ovsPod.Name, + defaultNetworkBridgeMapping(), + ); err != nil { + return fmt.Errorf("failed to restore default bridge mappings for pod %s/%s during cleanup: %w", ovsPod.Namespace, ovsPod.Name, err) + } + return nil + }) + + if err := configureBridgeMappings( + ovsPod.Namespace, + ovsPod.Name, + defaultNetworkBridgeMapping(), + bridgeMapping(underlay.LogicalNetworkName, underlay.BridgeName), + ); err != nil { + return fmt.Errorf("failed to configure bridge mappings for pod %s/%s for logical network %s to bridge %s: %w", ovsPod.Namespace, ovsPod.Name, underlay.LogicalNetworkName, underlay.BridgeName, err) + } + } + return nil + +} + func (c *contextKind) AddCleanUpFn(cleanUpFn func() error) { c.Lock() defer c.Unlock() @@ -414,14 +497,13 @@ func (c *contextKind) cleanUp() error { const ( nameFormat = "{{.Name}}" - inspectNetworkIPAMJSON = "{{json .IPAM.Config }}" - inspectNetworkIPv4GWKeyStr = "{{ .NetworkSettings.Networks.%s.Gateway }}" - inspectNetworkIPv4AddrKeyStr = "{{ .NetworkSettings.Networks.%s.IPAddress }}" - inspectNetworkIPv4PrefixKeyStr = "{{ .NetworkSettings.Networks.%s.IPPrefixLen }}" - inspectNetworkIPv6GWKeyStr = "{{ .NetworkSettings.Networks.%s.IPv6Gateway }}" - inspectNetworkIPv6AddrKeyStr = "{{ .NetworkSettings.Networks.%s.GlobalIPv6Address }}" - inspectNetworkIPv6PrefixKeyStr = "{{ .NetworkSettings.Networks.%s.GlobalIPv6PrefixLen }}" - inspectNetworkMACKeyStr = "{{ .NetworkSettings.Networks.%s.MacAddress }}" + inspectNetworkIPv4GWKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .Gateway }}{{ end }}" + inspectNetworkIPv4AddrKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .IPAddress }}{{ end }}" + inspectNetworkIPv4PrefixKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .IPPrefixLen }}{{ end }}" + inspectNetworkIPv6GWKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .IPv6Gateway }}{{ end }}" + inspectNetworkIPv6AddrKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .GlobalIPv6Address }}{{ end }}" + inspectNetworkIPv6PrefixKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .GlobalIPv6PrefixLen }}{{ end }}" + inspectNetworkMACKeyStr = "{{ with index .NetworkSettings.Networks %q }}{{ .MacAddress }}{{ end }}" inspectNetworkContainersKeyStr = "{{ range $key, $value := .Containers }}{{ printf \"%s\\n\" $value.Name}}{{ end }}'" emptyValue = "" ) @@ -437,7 +519,7 @@ func isNetworkAttachedToContainer(networkName, containerName string) bool { func doesContainerNameExist(name string) bool { // check if it is present before retrieving logs - stdOut, err := exec.Command(containerengine.Get().String(), "ps", "-f", fmt.Sprintf("Name=^%s$", name), "-q").CombinedOutput() + stdOut, err := exec.Command(containerengine.Get().String(), "ps", "-f", fmt.Sprintf("name=^%s$", name), "-q").CombinedOutput() if err != nil { panic(fmt.Sprintf("failed to check if external container (%s) exists: %v (%s)", name, err, stdOut)) } @@ -466,13 +548,16 @@ func getNetwork(networkName string) (containerEngineNetwork, error) { return n, api.NotFound } configs := make([]containerEngineNetworkConfig, 0, 1) - dataBytes, err := exec.Command(containerengine.Get().String(), "network", "inspect", "-f", inspectNetworkIPAMJSON, networkName).CombinedOutput() + + ce := containerengine.Get() + netConfFmt := ce.NetworkCIDRsFmt() + dataBytes, err := exec.Command(ce.String(), "network", "inspect", "-f", netConfFmt, networkName).CombinedOutput() if err != nil { return n, fmt.Errorf("failed to extract network %q data: %v", networkName, err) } dataBytes = []byte(strings.Trim(string(dataBytes), "\n")) if err = json.Unmarshal(dataBytes, &configs); err != nil { - return n, fmt.Errorf("failed to unmarshall network %q configuration using network inspect -f %q: %v", networkName, inspectNetworkIPAMJSON, err) + return n, fmt.Errorf("failed to unmarshall network %q configuration using network inspect -f %q: %v", networkName, netConfFmt, err) } if len(configs) == 0 { return n, fmt.Errorf("failed to find any IPAM configuration for network %s", networkName) diff --git a/test/e2e/infraprovider/providers/kind/ovs.go b/test/e2e/infraprovider/providers/kind/ovs.go new file mode 100644 index 0000000000..337ae4e702 --- /dev/null +++ b/test/e2e/infraprovider/providers/kind/ovs.go @@ -0,0 +1,93 @@ +package kind + +import ( + "fmt" + "strings" + "time" + + "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + + e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" +) + +const ( + secondaryBridge = "ovsbr1" +) + +func ensureOVSBridge(podNamespace, podName string, bridgeName string) error { + cmd := fmt.Sprintf("ovs-vsctl br-exists %[1]s || ovs-vsctl add-br %[1]s", bridgeName) + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to add ovs bridge %q: %v", bridgeName, err) + } + return nil +} + +func removeOVSBridge(podNamespace, podName string, bridgeName string) error { + cmd := fmt.Sprintf("if ovs-vsctl br-exists %[1]s; then ovs-vsctl del-br %[1]s; fi", bridgeName) + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to remove ovs bridge %q: %v", bridgeName, err) + } + return nil +} + +func ovsAttachPortToBridge(podNamespace, podName string, bridgeName string, portName string) error { + cmd := fmt.Sprintf("ovs-vsctl list port %[2]s || ovs-vsctl add-port %[1]s %[2]s", bridgeName, portName) + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to addadd port %s from OVS bridge %s: %v", portName, bridgeName, err) + } + return nil +} + +func ovsEnableVLANAccessPort(podNamespace, podName string, bridgeName string, portName string, vlanID int) error { + cmd := fmt.Sprintf("ovs-vsctl set port %[1]s tag=%[2]d vlan_mode=access", portName, vlanID) + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to enable vlan access port %s from OVS bridge %s: %v", portName, bridgeName, err) + } + return nil +} + +type BridgeMapping struct { + physnet string + ovsBridge string +} + +func (bm BridgeMapping) String() string { + return fmt.Sprintf("%s:%s", bm.physnet, bm.ovsBridge) +} + +type BridgeMappings []BridgeMapping + +func (bms BridgeMappings) String() string { + return strings.Join(Map(bms, func(bm BridgeMapping) string { return bm.String() }), ",") +} + +func Map[T, V any](items []T, fn func(T) V) []V { + result := make([]V, len(items)) + for i, t := range items { + result[i] = fn(t) + } + return result +} + +func configureBridgeMappings(podNamespace, podName string, mappings ...BridgeMapping) error { + mappingsString := fmt.Sprintf("external_ids:ovn-bridge-mappings=%s", BridgeMappings(mappings).String()) + cmd := strings.Join([]string{"ovs-vsctl", "set", "open", ".", mappingsString}, " ") + if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { + return fmt.Errorf("failed to configure bridge mappings %q: %v", mappingsString, err) + } + return nil +} + +func defaultNetworkBridgeMapping() BridgeMapping { + return BridgeMapping{ + physnet: "physnet", + ovsBridge: deploymentconfig.Get().ExternalBridgeName(), + } +} + +func bridgeMapping(physnet, ovsBridge string) BridgeMapping { + return BridgeMapping{ + physnet: physnet, + ovsBridge: ovsBridge, + } +} diff --git a/test/e2e/kubevirt.go b/test/e2e/kubevirt.go index aa0a6a246c..67ab2e290a 100644 --- a/test/e2e/kubevirt.go +++ b/test/e2e/kubevirt.go @@ -8,7 +8,6 @@ import ( "net" "net/netip" "os" - "os/exec" "strings" "sync" "time" @@ -24,6 +23,7 @@ import ( "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" "github.com/ovn-org/ovn-kubernetes/test/e2e/diagnostics" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" @@ -49,7 +49,7 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" testutils "k8s.io/kubernetes/test/utils" utilnet "k8s.io/utils/net" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" crclient "sigs.k8s.io/controller-runtime/pkg/client" butaneconfig "github.com/coreos/butane/config" @@ -97,11 +97,12 @@ func newControllerRuntimeClient() (crclient.Client, error) { }) } -var _ = Describe("Kubevirt Virtual Machines", func() { +var _ = Describe("Kubevirt Virtual Machines", feature.VirtualMachineSupport, func() { var ( fr = wrappedTestFramework("kv-live-migration") d = diagnostics.New(fr) crClient crclient.Client + virtClient *kubevirt.Client namespace string iperf3DefaultPort = int32(5201) tcpServerPort = int32(9900) @@ -299,7 +300,7 @@ var _ = Describe("Kubevirt Virtual Machines", func() { output := "" Eventually(func() error { var err error - output, err = kubevirt.RunCommand(vmi, fmt.Sprintf("curl http://%s", net.JoinHostPort(podIP, "8000")), polling) + output, err = virtClient.RunCommand(vmi, fmt.Sprintf("curl http://%s", net.JoinHostPort(podIP, "8000")), polling) return err }). WithPolling(polling). @@ -315,7 +316,7 @@ var _ = Describe("Kubevirt Virtual Machines", func() { polling := 15 * time.Second for podName, serverPodIPs := range serverPodIPsByName { for _, serverPodIP := range serverPodIPs { - output, err := kubevirt.RunCommand(vmi, fmt.Sprintf("iperf3 -t 0 -c %[2]s --logfile /tmp/%[1]s_%[2]s_iperf3.log &", podName, serverPodIP), polling) + output, err := virtClient.RunCommand(vmi, fmt.Sprintf("iperf3 -t 0 -c %[2]s --logfile /tmp/%[1]s_%[2]s_iperf3.log &", podName, serverPodIP), polling) if err != nil { return fmt.Errorf("%s: %w", output, err) } @@ -360,7 +361,7 @@ var _ = Describe("Kubevirt Virtual Machines", func() { for _, podIP := range podIPs { iperfLogFile := fmt.Sprintf("/tmp/%s_%s_iperf3.log", podName, podIP) execFn := func(cmd string) (string, error) { - return kubevirt.RunCommand(vmi, cmd, 2*time.Second) + return virtClient.RunCommand(vmi, cmd, 2*time.Second) } checkIperfTraffic(iperfLogFile, execFn, stage) } @@ -392,10 +393,10 @@ var _ = Describe("Kubevirt Virtual Machines", func() { return nil } - startNorthSouthIngressIperfTraffic = func(containerName string, addresses []string, port int32, stage string) error { + startNorthSouthIngressIperfTraffic = func(container infraapi.ExternalContainer, addresses []string, port int32, stage string) error { GinkgoHelper() execFn := func(cmd string) (string, error) { - return infraprovider.Get().ExecExternalContainerCommand(infraapi.ExternalContainer{Name: containerName}, []string{"bash", "-c", cmd}) + return infraprovider.Get().ExecExternalContainerCommand(container, []string{"bash", "-c", cmd}) } return startNorthSouthIperfTraffic(execFn, addresses, port, "ingress", stage) } @@ -403,18 +404,18 @@ var _ = Describe("Kubevirt Virtual Machines", func() { startNorthSouthEgressIperfTraffic = func(vmi *kubevirtv1.VirtualMachineInstance, addresses []string, port int32, stage string) error { GinkgoHelper() execFn := func(cmd string) (string, error) { - return kubevirt.RunCommand(vmi, cmd, 5*time.Second) + return virtClient.RunCommand(vmi, cmd, 5*time.Second) } return startNorthSouthIperfTraffic(execFn, addresses, port, "egress", stage) } - checkNorthSouthIngressIperfTraffic = func(containerName string, addresses []string, port int32, stage string) { + checkNorthSouthIngressIperfTraffic = func(container infraapi.ExternalContainer, addresses []string, port int32, stage string) { GinkgoHelper() Expect(addresses).NotTo(BeEmpty()) for _, ip := range addresses { iperfLogFile := fmt.Sprintf("/tmp/ingress_test_%s_%d_iperf3.log", ip, port) execFn := func(cmd string) (string, error) { - return infraprovider.Get().ExecExternalContainerCommand(infraapi.ExternalContainer{Name: containerName}, []string{"bash", "-c", cmd}) + return infraprovider.Get().ExecExternalContainerCommand(container, []string{"bash", "-c", cmd}) } checkIperfTraffic(iperfLogFile, execFn, stage) } @@ -430,7 +431,7 @@ var _ = Describe("Kubevirt Virtual Machines", func() { for _, ip := range addresses { iperfLogFile := fmt.Sprintf("/tmp/egress_test_%s_%d_iperf3.log", ip, port) execFn := func(cmd string) (string, error) { - return kubevirt.RunCommand(vmi, cmd, 5*time.Second) + return virtClient.RunCommand(vmi, cmd, 5*time.Second) } checkIperfTraffic(iperfLogFile, execFn, stage) } @@ -445,7 +446,7 @@ var _ = Describe("Kubevirt Virtual Machines", func() { continue } cmd := fmt.Sprintf("ping -c 3 -W 2 %s", ip) - stdout, err := kubevirt.RunCommand(vmi, cmd, 5*time.Second) + stdout, err := virtClient.RunCommand(vmi, cmd, 5*time.Second) Expect(err).NotTo(HaveOccurred()) Expect(stdout).To(ContainSubstring(" 0% packet loss")) } @@ -471,7 +472,7 @@ var _ = Describe("Kubevirt Virtual Machines", func() { networkStatuses, err = podNetworkStatus(pod, networkStatusPredicate) return networkStatuses, err }). - WithTimeout(5 * time.Second). + WithTimeout(15 * time.Second). WithPolling(200 * time.Millisecond). Should(HaveLen(1)) for _, ip := range networkStatuses[0].IPs { @@ -513,7 +514,7 @@ var _ = Describe("Kubevirt Virtual Machines", func() { step = by(vmName, stage+": Check n/s tcp traffic") output := "" Eventually(func() error { - output, err = kubevirt.RunCommand(vmi, "curl -kL https://kubernetes.default.svc.cluster.local", polling) + output, err = virtClient.RunCommand(vmi, "curl -kL https://kubernetes.default.svc.cluster.local", polling) return err }). WithPolling(polling). @@ -729,7 +730,7 @@ var _ = Describe("Kubevirt Virtual Machines", func() { addressByFamily = func(familyFn func(iface kubevirt.Interface) []kubevirt.Address, vmi *kubevirtv1.VirtualMachineInstance) func() ([]kubevirt.Address, error) { return func() ([]kubevirt.Address, error) { - networkState, err := kubevirt.RetrieveNetworkState(vmi) + networkState, err := kubevirt.RetrieveNetworkState(virtClient, vmi) if err != nil { return nil, err } @@ -793,9 +794,9 @@ var _ = Describe("Kubevirt Virtual Machines", func() { }).WithPolling(time.Second).WithTimeout(time.Minute).Should(Succeed()) } - waitVirtualMachineInstanceReadiness = func(vmi *kubevirtv1.VirtualMachineInstance) { + waitVirtualMachineInstanceReadinessWith = func(vmi *kubevirtv1.VirtualMachineInstance, conditionStatus corev1.ConditionStatus) { GinkgoHelper() - By(fmt.Sprintf("Waiting for readiness at virtual machine %s", vmi.Name)) + By(fmt.Sprintf("Waiting for readiness=%q at virtual machine %s", conditionStatus, vmi.Name)) Eventually(func() []kubevirtv1.VirtualMachineInstanceCondition { err := crClient.Get(context.Background(), crclient.ObjectKeyFromObject(vmi), vmi) Expect(err).To(SatisfyAny( @@ -806,10 +807,20 @@ var _ = Describe("Kubevirt Virtual Machines", func() { }).WithPolling(time.Second).WithTimeout(5 * time.Minute).Should( ContainElement(SatisfyAll( HaveField("Type", kubevirtv1.VirtualMachineInstanceReady), - HaveField("Status", corev1.ConditionTrue), + HaveField("Status", conditionStatus), ))) } + waitVirtualMachineInstanceReadiness = func(vmi *kubevirtv1.VirtualMachineInstance) { + GinkgoHelper() + waitVirtualMachineInstanceReadinessWith(vmi, corev1.ConditionTrue) + } + + waitVirtualMachineInstanceFailed = func(vmi *kubevirtv1.VirtualMachineInstance) { + GinkgoHelper() + waitVirtualMachineInstanceReadinessWith(vmi, corev1.ConditionFalse) + } + waitVirtualMachineAddresses = func(vmi *kubevirtv1.VirtualMachineInstance) []kubevirt.Address { GinkgoHelper() step := by(vmi.Name, "Wait for virtual machine to receive IPv4 address from DHCP") @@ -820,14 +831,14 @@ var _ = Describe("Kubevirt Virtual Machines", func() { addresses, err := addressByFamily(ipv4, vmi)() Expect(err).NotTo(HaveOccurred()) if isDualStack() { - output, err := kubevirt.RunCommand(vmi, `echo '{"interfaces":[{"name":"enp1s0","type":"ethernet","state":"up","ipv4":{"enabled":true,"dhcp":true},"ipv6":{"enabled":true,"dhcp":true,"autoconf":false}}],"routes":{"config":[{"destination":"::/0","next-hop-interface":"enp1s0","next-hop-address":"fe80::1"}]}}' |nmstatectl apply`, 5*time.Second) + output, err := virtClient.RunCommand(vmi, `echo '{"interfaces":[{"name":"enp1s0","type":"ethernet","state":"up","ipv4":{"enabled":true,"dhcp":true},"ipv6":{"enabled":true,"dhcp":true,"autoconf":false}}],"routes":{"config":[{"destination":"::/0","next-hop-interface":"enp1s0","next-hop-address":"fe80::1"}]}}' |nmstatectl apply`, 5*time.Second) Expect(err).NotTo(HaveOccurred(), output) step = by(vmi.Name, "Wait for virtual machine to receive IPv6 address from DHCP") Eventually(addressByFamily(ipv6, vmi)). WithPolling(time.Second). WithTimeout(5*time.Minute). Should(HaveLen(2), func() string { - output, _ := kubevirt.RunCommand(vmi, "journalctl -u nmstate", 2*time.Second) + output, _ := virtClient.RunCommand(vmi, "journalctl -u nmstate", 2*time.Second) return step + " -> journal nmstate: " + output }) ipv6Addresses, err := addressByFamily(ipv6, vmi)() @@ -902,7 +913,7 @@ var _ = Describe("Kubevirt Virtual Machines", func() { NetworkSource: networkSource, }, }, - TerminationGracePeriodSeconds: pointer.Int64(5), + TerminationGracePeriodSeconds: ptr.To(int64(5)), Volumes: []kubevirtv1.Volume{ { Name: "containerdisk", @@ -928,7 +939,7 @@ var _ = Describe("Kubevirt Virtual Machines", func() { GenerateName: vmi.GenerateName, }, Spec: kubevirtv1.VirtualMachineSpec{ - Running: pointer.Bool(true), + RunStrategy: ptr.To(kubevirtv1.RunStrategyAlways), Template: &kubevirtv1.VirtualMachineInstanceTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Annotations: vmi.Annotations, @@ -1065,7 +1076,7 @@ passwd: } err := crClient.Get(context.TODO(), crclient.ObjectKeyFromObject(vmi), vmi) Expect(err).NotTo(HaveOccurred()) - Expect(kubevirt.LoginToFedora(vmi, "core", "fedora")).To(Succeed(), step) + Expect(virtClient.LoginToFedora(vmi, "core", "fedora")).To(Succeed(), step) waitVirtualMachineAddresses(vmi) @@ -1073,7 +1084,7 @@ passwd: svc, err := fr.ClientSet.CoreV1().Services(namespace).Create(context.TODO(), composeService("tcpserver", vm.Name, tcpServerPort), metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred(), step) defer func() { - output, err := kubevirt.RunCommand(vmi, "podman logs tcpserver", 10*time.Second) + output, err := virtClient.RunCommand(vmi, "podman logs tcpserver", 10*time.Second) Expect(err).NotTo(HaveOccurred()) fmt.Printf("%s tcpserver logs: %s", vmi.Name, output) }() @@ -1121,11 +1132,15 @@ passwd: g.Expect(pod.Status.PodIP).NotTo(BeEmpty(), "pod %s has no valid IP address yet", pod.Name) } + sanitizeNodeName = func(nodeName string) string { + return strings.ReplaceAll(nodeName, ".", "-") + } + createHTTPServerPods = func(annotations map[string]string) []*corev1.Pod { var pods []*corev1.Pod for _, selectedNode := range selectedNodes { pod := composeAgnhostPod( - "testpod-"+selectedNode.Name, + "testpod-"+sanitizeNodeName(selectedNode.Name), namespace, selectedNode.Name, "netexec", "--http-port", "8000") @@ -1195,7 +1210,7 @@ fi IPRequest: staticIPs, } } - pod, err := createPod(fr, "testpod-"+node.Name, node.Name, namespace, []string{"bash", "-c"}, map[string]string{}, func(pod *corev1.Pod) { + pod, err := createPod(fr, "testpod-"+sanitizeNodeName(node.Name), node.Name, namespace, []string{"bash", "-c"}, map[string]string{}, func(pod *corev1.Pod) { if nse != nil { pod.Annotations = networkSelectionElements(*nse) } @@ -1323,6 +1338,9 @@ fi var err error crClient, err = newControllerRuntimeClient() Expect(err).NotTo(HaveOccurred()) + + virtClient, err = kubevirt.NewClient("/tmp") + Expect(err).NotTo(HaveOccurred()) }) Context("with default pod network", Ordered, func() { @@ -1404,7 +1422,7 @@ fi Expect(err).NotTo(HaveOccurred()) d.ConntrackDumpingDaemonSet() - d.OVSFlowsDumpingDaemonSet("breth0") + d.OVSFlowsDumpingDaemonSet(deploymentconfig.Get().ExternalBridgeName()) d.IPTablesDumpingDaemonSet() bandwidthPerMigration := resource.MustParse("40Mi") @@ -1413,8 +1431,8 @@ fi Name: "force-post-copy", }, Spec: kvmigrationsv1alpha1.MigrationPolicySpec{ - AllowPostCopy: pointer.Bool(true), - CompletionTimeoutPerGiB: pointer.Int64(1), + AllowPostCopy: ptr.To(true), + CompletionTimeoutPerGiB: ptr.To(int64(1)), BandwidthPerMigration: &bandwidthPerMigration, Selectors: &kvmigrationsv1alpha1.Selectors{ VirtualMachineInstanceSelector: kvmigrationsv1alpha1.LabelSelector{ @@ -1522,7 +1540,7 @@ fi description: "restart", cmd: func() { By("Restarting vm") - output, err := exec.Command("virtctl", "restart", "-n", namespace, vmi.Name).CombinedOutput() + output, err := virtClient.RestartVirtualMachine(vmi) Expect(err).NotTo(HaveOccurred(), output) By("Wait some time to vmi conditions to catch up after restart") @@ -1653,11 +1671,11 @@ write_files: ingress string } var ( - containerNetwork = func(td testData) string { + containerNetwork = func(td testData) (infraapi.Network, error) { if td.ingress == "routed" { - return "bgpnet" + return infraprovider.Get().GetNetwork("bgpnet") } - return "kind" + return infraprovider.Get().PrimaryNetwork() } exposeVMIperfServer = func(td testData, vmi *kubevirtv1.VirtualMachineInstance, vmiAddresses []string) ([]string, int32) { GinkgoHelper() @@ -1698,22 +1716,12 @@ write_files: namespace = fr.Namespace.Name networkName := "" - cidrs := generateL2Subnets(cidrIPv4, cidrIPv6) - cudn, networkName = kubevirt.GenerateCUDN(namespace, "net1", td.topology, td.role, cidrs) + dualCIDRs := filterDualStackCIDRs(fr.ClientSet, []udnv1.CIDR{udnv1.CIDR(cidrIPv4), udnv1.CIDR(cidrIPv6)}) + cudn, networkName = kubevirt.GenerateCUDN(namespace, "net1", td.topology, td.role, dualCIDRs) if td.topology == udnv1.NetworkTopologyLocalnet { By("setting up the localnet underlay") - nodes := ovsPods(clientSet) - Expect(nodes).NotTo(BeEmpty()) - DeferCleanup(func() { - if e2eframework.TestContext.DeleteNamespace && (e2eframework.TestContext.DeleteNamespaceOnFailure || !CurrentSpecReport().Failed()) { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) - } - }) - - const secondaryInterfaceName = "eth1" - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, networkName, 0 /*vlanID*/)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(fr, infraapi.Underlay{LogicalNetworkName: networkName})).To(Succeed()) } createCUDN(cudn) @@ -1744,23 +1752,22 @@ write_files: iperfServerTestPods, err = createIperfServerPods(selectedNodes, cudn.Name, td.role, []string{}) Expect(err).NotTo(HaveOccurred()) - network, err := infraprovider.Get().PrimaryNetwork() - Expect(err).ShouldNot(HaveOccurred(), "primary network must be available to attach containers") - if containerNetwork := containerNetwork(td); containerNetwork != network.Name() { - network, err = infraprovider.Get().GetNetwork(containerNetwork) - Expect(err).ShouldNot(HaveOccurred(), "must to get alternative network") - } - externalContainerPort := infraprovider.Get().GetExternalContainerPort() - externalContainerName := namespace + "-iperf" - externalContainerSpec := infraapi.ExternalContainer{ - Name: externalContainerName, - Image: images.IPerf3(), - Network: network, - Args: []string{"sleep infinity"}, - ExtPort: externalContainerPort, + var externalContainer infraapi.ExternalContainer + if td.role == udnv1.NetworkRolePrimary { + providerNetwork, err := containerNetwork(td) + Expect(err).ShouldNot(HaveOccurred(), "primary network must be available to attach containers") + externalContainerPort := infraprovider.Get().GetExternalContainerPort() + externalContainerName := namespace + "-iperf" + externalContainerSpec := infraapi.ExternalContainer{ + Name: externalContainerName, + Image: images.IPerf3(), + Network: providerNetwork, + CmdArgs: []string{"sleep infinity"}, + ExtPort: externalContainerPort, + } + externalContainer, err = providerCtx.CreateExternalContainer(externalContainerSpec) + Expect(err).ShouldNot(HaveOccurred(), "creation of external container is test dependency") } - externalContainer, err := providerCtx.CreateExternalContainer(externalContainerSpec) - Expect(err).ShouldNot(HaveOccurred(), "creation of external container is test dependency") var externalContainerIPs []string if externalContainer.IsIPv4() { @@ -1773,8 +1780,8 @@ write_files: if td.ingress == "routed" { // pre=created test dependency and therefore we dont delete frrExternalContainer := infraapi.ExternalContainer{Name: "frr"} - frrNetwork, err := infraprovider.Get().GetNetwork(containerNetwork(td)) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to fetch network %q: %v", containerNetwork(td), err)) + frrNetwork, err := containerNetwork(td) + Expect(err).NotTo(HaveOccurred()) frrExternalContainerInterface, err := infraprovider.Get().GetExternalContainerNetworkInterface(frrExternalContainer, frrNetwork) Expect(err).NotTo(HaveOccurred(), "must fetch FRR container network interface attached to secondary network") @@ -1800,7 +1807,7 @@ ip route add %[3]s via %[4]s step := by(vmi.Name, "Login to virtual machine for the first time") Eventually(func() error { - return kubevirt.LoginToFedora(vmi, "fedora", "fedora") + return virtClient.LoginToFedora(vmi, "fedora", "fedora") }). WithTimeout(5*time.Second). WithPolling(time.Second). @@ -1808,7 +1815,7 @@ ip route add %[3]s via %[4]s // expect 2 addresses on dual-stack deployments; 1 on single-stack step = by(vmi.Name, "Wait for addresses at the virtual machine") - expectedNumberOfAddresses := len(cidrs) + expectedNumberOfAddresses := len(dualCIDRs) expectedAddreses := virtualMachineAddressesFromStatus(vmi, expectedNumberOfAddresses) expectedAddresesAtGuest := expectedAddreses testPodsIPs := podsMultusNetworkIPs(iperfServerTestPods, podNetworkStatusByNetConfigPredicate(namespace, cudn.Name, strings.ToLower(string(td.role)))) @@ -1825,7 +1832,7 @@ ip route add %[3]s via %[4]s Expect(testPodsIPs).NotTo(BeEmpty()) Eventually(kubevirt.RetrieveAllGlobalAddressesFromGuest). - WithArguments(vmi). + WithArguments(virtClient, vmi). WithTimeout(5*time.Second). WithPolling(time.Second). Should(ConsistOf(expectedAddresesAtGuest), step) @@ -1835,7 +1842,7 @@ ip route add %[3]s via %[4]s checkEastWestIperfTraffic(vmi, testPodsIPs, step) if td.role == udnv1.NetworkRolePrimary { - if isIPv6Supported() && isInterconnectEnabled() { + if isIPv6Supported(fr.ClientSet) && isInterconnectEnabled() { step = by(vmi.Name, fmt.Sprintf("Checking IPv6 gateway before %s %s", td.resource.description, td.test.description)) nodeRunningVMI, err := fr.ClientSet.CoreV1().Nodes().Get(context.Background(), vmi.Status.NodeName, metav1.GetOptions{}) @@ -1844,16 +1851,16 @@ ip route add %[3]s via %[4]s expectedIPv6GatewayPath, err := kubevirt.GenerateGatewayIPv6RouterLLA(nodeRunningVMI, networkName) Expect(err).NotTo(HaveOccurred()) Eventually(kubevirt.RetrieveIPv6Gateways). - WithArguments(vmi). + WithArguments(virtClient, vmi). WithTimeout(5*time.Second). WithPolling(time.Second). Should(Equal([]string{expectedIPv6GatewayPath}), "should filter remote ipv6 gateway nexthop") } step = by(vmi.Name, fmt.Sprintf("Check north/south traffic before %s %s", td.resource.description, td.test.description)) - output, err := kubevirt.RunCommand(vmi, "/tmp/iperf-server.sh", time.Minute) + output, err := virtClient.RunCommand(vmi, "/tmp/iperf-server.sh", time.Minute) Expect(err).NotTo(HaveOccurred(), step+": "+output) - Expect(startNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step)).To(Succeed()) - checkNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step) + Expect(startNorthSouthIngressIperfTraffic(externalContainer, serverIPs, serverPort, step)).To(Succeed()) + checkNorthSouthIngressIperfTraffic(externalContainer, serverIPs, serverPort, step) checkNorthSouthEgressICMPTraffic(vmi, externalContainerIPs, step) if td.ingress == "routed" { _, err := infraprovider.Get().ExecExternalContainerCommand(externalContainer, []string{"bash", "-c", iperfServerScript}) @@ -1873,13 +1880,13 @@ ip route add %[3]s via %[4]s td.test.cmd() step = by(vmi.Name, fmt.Sprintf("Login to virtual machine after %s %s", td.resource.description, td.test.description)) - Expect(kubevirt.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) + Expect(virtClient.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) obtainedAddresses := virtualMachineAddressesFromStatus(vmi, expectedNumberOfAddresses) Expect(obtainedAddresses).To(Equal(expectedAddreses)) Eventually(kubevirt.RetrieveAllGlobalAddressesFromGuest). - WithArguments(vmi). + WithArguments(virtClient, vmi). WithTimeout(5*time.Second). WithPolling(time.Second). Should(ConsistOf(expectedAddresesAtGuest), step) @@ -1889,15 +1896,15 @@ ip route add %[3]s via %[4]s // At restart we need re-connect Expect(startEastWestIperfTraffic(vmi, testPodsIPs, step)).To(Succeed(), step) if td.role == udnv1.NetworkRolePrimary { - output, err := kubevirt.RunCommand(vmi, "/tmp/iperf-server.sh &", time.Minute) + output, err := virtClient.RunCommand(vmi, "/tmp/iperf-server.sh &", time.Minute) Expect(err).NotTo(HaveOccurred(), step+": "+output) - Expect(startNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step)).To(Succeed()) + Expect(startNorthSouthIngressIperfTraffic(externalContainer, serverIPs, serverPort, step)).To(Succeed()) } } checkEastWestIperfTraffic(vmi, testPodsIPs, step) if td.role == udnv1.NetworkRolePrimary { step = by(vmi.Name, fmt.Sprintf("Check north/south traffic after %s %s", td.resource.description, td.test.description)) - checkNorthSouthIngressIperfTraffic(externalContainerName, serverIPs, serverPort, step) + checkNorthSouthIngressIperfTraffic(externalContainer, serverIPs, serverPort, step) checkNorthSouthEgressICMPTraffic(vmi, externalContainerIPs, step) if td.ingress == "routed" { checkNorthSouthEgressIperfTraffic(vmi, externalContainerIPs, iperf3DefaultPort, step) @@ -1905,7 +1912,7 @@ ip route add %[3]s via %[4]s } if td.role == udnv1.NetworkRolePrimary && td.test.description == liveMigrate.description && isInterconnectEnabled() { - if isIPv4Supported() { + if isIPv4Supported(fr.ClientSet) { step = by(vmi.Name, fmt.Sprintf("Checking IPv4 gateway cached mac after %s %s", td.resource.description, td.test.description)) Expect(crClient.Get(context.TODO(), crclient.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) @@ -1917,12 +1924,12 @@ ip route add %[3]s via %[4]s Expect(err).NotTo(HaveOccurred(), step) Eventually(kubevirt.RetrieveCachedGatewayMAC). - WithArguments(vmi, "enp1s0", cidrIPv4). + WithArguments(virtClient, vmi, "enp1s0", cidrIPv4). WithTimeout(10*time.Second). WithPolling(time.Second). Should(Equal(expectedGatewayMAC), step) } - if isIPv6Supported() { + if isIPv6Supported(fr.ClientSet) { step = by(vmi.Name, fmt.Sprintf("Checking IPv6 gateway after %s %s", td.resource.description, td.test.description)) targetNode, err := fr.ClientSet.CoreV1().Nodes().Get(context.Background(), vmi.Status.MigrationState.TargetNode, metav1.GetOptions{}) @@ -1931,7 +1938,7 @@ ip route add %[3]s via %[4]s targetNodeIPv6GatewayPath, err := kubevirt.GenerateGatewayIPv6RouterLLA(targetNode, networkName) Expect(err).NotTo(HaveOccurred()) Eventually(kubevirt.RetrieveIPv6Gateways). - WithArguments(vmi). + WithArguments(virtClient, vmi). WithTimeout(5*time.Second). WithPolling(time.Second). Should(Equal([]string{targetNodeIPv6GatewayPath}), "should reconcile ipv6 gateway nexthop after live migration") @@ -1947,7 +1954,7 @@ ip route add %[3]s via %[4]s if td.ingress != "" { ingress = td.ingress } - return fmt.Sprintf("after %s of %s with %s/%s with %q ingress", td.test.description, td.resource.description, role, td.topology, ingress) + return fmt.Sprintf("after %s of %s with %s/%s with %s ingress", td.test.description, td.resource.description, role, td.topology, ingress) }, Entry(nil, testData{ resource: virtualMachine, @@ -2054,8 +2061,8 @@ ip route add %[3]s via %[4]s }) fr.Namespace = ns namespace = fr.Namespace.Name - cidrs := generateL2Subnets(cidrIPv4, cidrIPv6) - cudn, _ := kubevirt.GenerateCUDN(namespace, "net1", udnv1.NetworkTopologyLayer2, udnv1.NetworkRolePrimary, cidrs) + dualCIDRs := filterDualStackCIDRs(fr.ClientSet, []udnv1.CIDR{udnv1.CIDR(cidrIPv4), udnv1.CIDR(cidrIPv6)}) + cudn, _ := kubevirt.GenerateCUDN(namespace, "net1", udnv1.NetworkTopologyLayer2, udnv1.NetworkRolePrimary, dualCIDRs) cudn.Spec.Network.Layer2.MTU = 1300 createCUDN(cudn) @@ -2096,7 +2103,7 @@ ip route add %[3]s via %[4]s Get(context.Background(), config.Kubernetes.DNSServiceName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - if isIPv4Supported() { + if isIPv4Supported(fr.ClientSet) { expectedIP, err := matchIPv4StringFamily(primaryUDNNetworkStatus.IPs) Expect(err).NotTo(HaveOccurred()) @@ -2124,7 +2131,7 @@ ip route add %[3]s via %[4]s Expect(primaryUDNValueForDevice("GENERAL.MTU")).To(ConsistOf("1300")) } - if isIPv6Supported() { + if isIPv6Supported(fr.ClientSet) { expectedIP, err := matchIPv6StringFamily(primaryUDNNetworkStatus.IPs) Expect(err).NotTo(HaveOccurred()) Eventually(primaryUDNValueFor). @@ -2163,7 +2170,7 @@ ip route add %[3]s via %[4]s vmiIPv4 = "10.128.0.100/24" vmiIPv6 = "2010:100:200::100/60" vmiMAC = "0A:58:0A:80:00:64" - cidr = selectCIDRs(ipv4CIDR, ipv6CIDR) + cidrs = []string{ipv4CIDR, ipv6CIDR} staticIPsNetworkData = func(ips []string) (string, error) { type Ethernet struct { Addresses []string `json:"addresses,omitempty"` @@ -2192,54 +2199,49 @@ chpasswd: { expire: False } ) DescribeTable("should maintain tcp connection with minimal downtime", func(td func(vmi *kubevirtv1.VirtualMachineInstance)) { By("setting up the localnet underlay") - nodes := ovsPods(clientSet) - Expect(nodes).NotTo(BeEmpty()) - DeferCleanup(func() { - if e2eframework.TestContext.DeleteNamespace && (e2eframework.TestContext.DeleteNamespaceOnFailure || !CurrentSpecReport().Failed()) { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) - } - }) - cudn, networkName := kubevirt.GenerateCUDN(namespace, "net1", udnv1.NetworkTopologyLocalnet, udnv1.NetworkRoleSecondary, udnv1.DualStackCIDRs{}) createCUDN(cudn) - const secondaryInterfaceName = "eth1" - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, networkName, 0 /*vlanID*/)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(fr, infraapi.Underlay{LogicalNetworkName: networkName})).To(Succeed()) workerNodeList, err := fr.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: labels.FormatLabels(map[string]string{"node-role.kubernetes.io/worker": ""})}) Expect(err).NotTo(HaveOccurred()) selectedNodes = workerNodeList.Items Expect(selectedNodes).NotTo(BeEmpty()) - iperfServerTestPods, err = createIperfServerPods(selectedNodes, cudn.Name, cudn.Spec.Network.Localnet.Role, cidr) + iperfServerTestPods, err = createIperfServerPods(selectedNodes, cudn.Name, cudn.Spec.Network.Localnet.Role, filterCIDRs(fr.ClientSet, cidrs...)) Expect(err).NotTo(HaveOccurred()) - networkData, err := staticIPsNetworkData(selectCIDRs(vmiIPv4, vmiIPv6)) + networkData, err := staticIPsNetworkData(filterCIDRs(fr.ClientSet, vmiIPv4, vmiIPv6)) Expect(err).NotTo(HaveOccurred()) - vmi := fedoraWithTestToolingVMI(nil /*labels*/, nil /*annotations*/, nil /*nodeSelector*/, kubevirtv1.NetworkSource{ + vm := fedoraWithTestToolingVM(nil /*labels*/, nil /*annotations*/, nil /*nodeSelector*/, kubevirtv1.NetworkSource{ Multus: &kubevirtv1.MultusNetwork{ NetworkName: cudn.Name, }, }, userData, networkData) // Harcode mac address so it's the same after live migration - vmi.Spec.Domain.Devices.Interfaces[0].MacAddress = vmiMAC - createVirtualMachineInstance(vmi) - + vm.Spec.Template.Spec.Domain.Devices.Interfaces[0].MacAddress = vmiMAC + createVirtualMachine(vm) + vmi := &kubevirtv1.VirtualMachineInstance{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: vm.Name, + }, + } waitVirtualMachineInstanceReadiness(vmi) Expect(crClient.Get(context.TODO(), crclient.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) step := by(vmi.Name, "Login to virtual machine for the first time") Eventually(func() error { - return kubevirt.LoginToFedora(vmi, "fedora", "fedora") + return virtClient.LoginToFedora(vmi, "fedora", "fedora") }). WithTimeout(5*time.Second). WithPolling(time.Second). Should(Succeed(), step) step = by(vmi.Name, "Wait for cloud init to finish at first boot") - output, err := kubevirt.RunCommand(vmi, "cloud-init status --wait", time.Minute) + output, err := virtClient.RunCommand(vmi, "cloud-init status --wait", time.Minute) Expect(err).NotTo(HaveOccurred(), step+": "+output) testPodsIPs := podsMultusNetworkIPs(iperfServerTestPods, podNetworkStatusByNetConfigPredicate(namespace, cudn.Name, strings.ToLower(string(cudn.Spec.Network.Localnet.Role)))) @@ -2252,11 +2254,39 @@ chpasswd: { expire: False } by(vmi.Name, "Running live migration for virtual machine instance") td(vmi) - step = by(vmi.Name, fmt.Sprintf("Login to virtual machine after virtual machine instance live migration")) - Expect(kubevirt.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) + // Update vmi status after live migration + Expect(crClient.Get(context.Background(), crclient.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + step = by(vmi.Name, "Login to virtual machine after virtual machine instance live migration") + Expect(virtClient.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) step = by(vmi.Name, "Check east/west traffic after virtual machine instance live migration") checkEastWestIperfTraffic(vmi, testPodsIPs, step) + + By("Stop iperf3 traffic before force killing vm, so iperf3 server do not get stuck") + output, err = virtClient.RunCommand(vmi, "killall iperf3", 5*time.Second) + Expect(err).ToNot(HaveOccurred(), output) + + step = by(vmi.Name, fmt.Sprintf("Force kill qemu at node %q where VM is running on", vmi.Status.NodeName)) + Expect(kubevirt.ForceKillVirtLauncherAtNode(infraprovider.Get(), vmi.Status.NodeName, vmi.Namespace, vmi.Name)).To(Succeed()) + + step = by(vmi.Name, "Waiting for failed restarted VMI to reach ready state") + waitVirtualMachineInstanceFailed(vmi) + waitVirtualMachineInstanceReadiness(vmi) + Expect(crClient.Get(context.TODO(), crclient.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + step = by(vmi.Name, "Login to virtual machine after virtual machine instance force killed") + Expect(virtClient.LoginToFedora(vmi, "fedora", "fedora")).To(Succeed(), step) + + step = by(vmi.Name, "Restart iperf traffic after forcing a vm failure") + Expect(startEastWestIperfTraffic(vmi, testPodsIPs, step)).To(Succeed(), step) + checkEastWestIperfTraffic(vmi, testPodsIPs, step) + + by(vmi.Name, "Running live migration after forcing vm failure") + td(vmi) + + step = by(vmi.Name, "Check east/west traffic for failed virtual machine after live migration") + checkEastWestIperfTraffic(vmi, testPodsIPs, step) }, Entry("after succeeded live migration", liveMigrateSucceed), Entry("after failed live migration", liveMigrateFailed), diff --git a/test/e2e/kubevirt/client.go b/test/e2e/kubevirt/client.go new file mode 100644 index 0000000000..60c2cbcc2f --- /dev/null +++ b/test/e2e/kubevirt/client.go @@ -0,0 +1,128 @@ +package kubevirt + +import ( + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + + retry "k8s.io/client-go/util/retry" + + kubevirtv1 "kubevirt.io/api/core/v1" +) + +type Client struct { + path string +} + +func NewClient(cliDir string) (*Client, error) { + // Ensure the virtctl directory exists. + if err := os.MkdirAll(cliDir, 0755); err != nil { + return nil, fmt.Errorf("failed to create virtctl directory %q: %w", cliDir, err) + } + + // Ensure the virtctl executable is present. + if err := ensureVirtctl(cliDir); err != nil { + return nil, fmt.Errorf("failed to ensure virtctl: %w", err) + } + + return &Client{path: filepath.Join(cliDir, "virtctl")}, nil +} + +func (virtctl *Client) RestartVirtualMachine(vmi *kubevirtv1.VirtualMachineInstance) (string, error) { + output, err := exec.Command(virtctl.path, "restart", "-n", vmi.Namespace, vmi.Name).CombinedOutput() + if err != nil { + return "", fmt.Errorf("failed to restart VM: %w", err) + } + return string(output), nil +} + +func ensureVirtctl(cliDir string) error { + // Check if the "virtctl" executable exists in the specified path. + // If it does not exist, call the installVirtctl function. + if _, err := os.Stat(filepath.Join(cliDir, "virtctl")); os.IsNotExist(err) { + return installVirtctl(cliDir) + } else if err != nil { + return fmt.Errorf("error checking virtctl executable: %w", err) + } + return nil +} + +func downloadVirtctlBinary() (io.ReadCloser, error) { + // Fetch the latest stable version of KubeVirt from the stable.txt file. + stableResp, err := http.Get("https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt") + if err != nil { + return nil, fmt.Errorf("failed to fetch stable version: %w", err) + } + defer stableResp.Body.Close() + + // Check if the HTTP response status is OK. + if stableResp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to fetch stable version: received status code %d", stableResp.StatusCode) + } + + // Read the version from the response body. + versionBytes, err := io.ReadAll(stableResp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read stable version: %w", err) + } + version := strings.TrimSpace(string(versionBytes)) + + // Construct the download URL for the virtctl binary. + virtctlURL := fmt.Sprintf("https://github.com/kubevirt/kubevirt/releases/download/%s/virtctl-%s-linux-amd64", version, version) + + // Download the virtctl binary. + virtctlResp, err := http.Get(virtctlURL) + if err != nil { + return nil, fmt.Errorf("failed to download virtctl: %w", err) + } + + // Check if the HTTP response status is OK. + if virtctlResp.StatusCode != http.StatusOK { + // Close the body on error to prevent resource leaks + virtctlResp.Body.Close() + return nil, fmt.Errorf("failed to download virtctl: received status code %d", virtctlResp.StatusCode) + } + + return virtctlResp.Body, nil +} + +func installVirtctl(cliDir string) error { + var virtctlBody io.ReadCloser + allErrors := func(err error) bool { + return true + } + err := retry.OnError(retry.DefaultRetry, allErrors, func() error { + var downloadErr error + virtctlBody, downloadErr = downloadVirtctlBinary() + return downloadErr // Return the error if download failed, nil otherwise. + }) + if err != nil { + // If err is not nil here, it means all retries failed. + return err + } + defer virtctlBody.Close() // Ensure the body is closed + + // Save the binary to the specified directory. + cliPath := filepath.Join(cliDir, "virtctl") + outFile, err := os.Create(cliPath) + if err != nil { + return fmt.Errorf("failed to create virtctl file at %s: %w", cliPath, err) + } + defer outFile.Close() + + _, err = io.Copy(outFile, virtctlBody) + if err != nil { + return fmt.Errorf("failed to save virtctl binary to %s: %w", cliPath, err) + } + + // Make the binary executable. + if err := os.Chmod(cliPath, 0755); err != nil { + return fmt.Errorf("failed to make virtctl executable at %s: %w", cliPath, err) + } + + return nil +} diff --git a/test/e2e/kubevirt/console.go b/test/e2e/kubevirt/console.go index 822bd04162..4ca7533be8 100644 --- a/test/e2e/kubevirt/console.go +++ b/test/e2e/kubevirt/console.go @@ -54,21 +54,12 @@ var ( shellFailRegexp = regexp.MustCompile(shellFail) ) -// SafeExpectBatch runs the batch from `expected`, connecting to a VMI's console and -// waiting `wait` seconds for the batch to return. -// It validates that the commands arrive to the console. -// NOTE: This functions heritage limitations from `expectBatchWithValidatedSend` refer to it to check them. -func safeExpectBatch(vmi *v1.VirtualMachineInstance, expected []expect.Batcher, timeout time.Duration) error { - _, err := safeExpectBatchWithResponse(vmi, expected, timeout) - return err -} - // safeExpectBatchWithResponse runs the batch from `expected`, connecting to a VMI's console and // waiting `wait` seconds for the batch to return with a response. // It validates that the commands arrive to the console. // NOTE: This functions inherits limitations from `expectBatchWithValidatedSend`, refer to it for more information. -func safeExpectBatchWithResponse(vmi *v1.VirtualMachineInstance, expected []expect.Batcher, timeout time.Duration) ([]expect.BatchRes, error) { - expecter, _, err := newExpecter(vmi, consoleConnectionTimeout, expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) +func safeExpectBatchWithResponse(virtctlPath string, vmi *v1.VirtualMachineInstance, expected []expect.Batcher, timeout time.Duration) ([]expect.BatchRes, error) { + expecter, _, err := newExpecter(virtctlPath, vmi, consoleConnectionTimeout, expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) if err != nil { return nil, err } @@ -81,8 +72,12 @@ func safeExpectBatchWithResponse(vmi *v1.VirtualMachineInstance, expected []expe return resp, err } -func RunCommand(vmi *v1.VirtualMachineInstance, command string, timeout time.Duration) (string, error) { - results, err := safeExpectBatchWithResponse(vmi, []expect.Batcher{ +func (virtctl *Client) RunCommand(vmi *v1.VirtualMachineInstance, command string, timeout time.Duration) (string, error) { + return runCommand(virtctl.path, vmi, command, timeout) +} + +func runCommand(virtctlPath string, vmi *v1.VirtualMachineInstance, command string, timeout time.Duration) (string, error) { + results, err := safeExpectBatchWithResponse(virtctlPath, vmi, []expect.Batcher{ &expect.BSnd{S: "\n"}, &expect.BExp{R: PromptExpression}, &expect.BSnd{S: command + "\n"}, @@ -114,10 +109,11 @@ func skipInput(scanner *bufio.Scanner) bool { // newExpecter will connect to an already logged in VMI console and return the generated expecter it will wait `timeout` for the connection. func newExpecter( + virtctlPath string, vmi *v1.VirtualMachineInstance, timeout time.Duration, opts ...expect.Option) (expect.Expecter, <-chan error, error) { - virtctlCmd := []string{"virtctl", "console", "-n", vmi.Namespace, vmi.Name} + virtctlCmd := []string{virtctlPath, "console", "-n", vmi.Namespace, vmi.Name} return expect.SpawnWithArgs(virtctlCmd, timeout, expect.SendTimeout(timeout), expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) } @@ -182,13 +178,13 @@ func expectBatchWithValidatedSend(expecter expect.Expecter, batch []expect.Batch return res, err } -func LoginToFedora(vmi *kubevirtv1.VirtualMachineInstance, user, password string) error { - return LoginToFedoraWithHostname(vmi, user, password, vmi.Name) +func (virtctl *Client) LoginToFedora(vmi *kubevirtv1.VirtualMachineInstance, user, password string) error { + return loginToFedoraWithHostname(virtctl.path, vmi, user, password, vmi.Name) } // LoginToFedora performs a console login to a Fedora base VM -func LoginToFedoraWithHostname(vmi *kubevirtv1.VirtualMachineInstance, user, password, hostname string) error { - expecter, _, err := newExpecter(vmi, consoleConnectionTimeout, expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) +func loginToFedoraWithHostname(virtctlPath string, vmi *kubevirtv1.VirtualMachineInstance, user, password, hostname string) error { + expecter, _, err := newExpecter(virtctlPath, vmi, consoleConnectionTimeout, expect.Verbose(true), expect.VerboseWriter(GinkgoWriter)) if err != nil { return err } diff --git a/test/e2e/kubevirt/ip.go b/test/e2e/kubevirt/ip.go index 180c7d252a..3e11bd9b92 100644 --- a/test/e2e/kubevirt/ip.go +++ b/test/e2e/kubevirt/ip.go @@ -8,7 +8,7 @@ import ( v1 "kubevirt.io/api/core/v1" ) -func RetrieveAllGlobalAddressesFromGuest(vmi *v1.VirtualMachineInstance) ([]string, error) { +func RetrieveAllGlobalAddressesFromGuest(cli *Client, vmi *v1.VirtualMachineInstance) ([]string, error) { ifaces := []struct { Name string `json:"ifname"` Addresses []struct { @@ -19,7 +19,7 @@ func RetrieveAllGlobalAddressesFromGuest(vmi *v1.VirtualMachineInstance) ([]stri } `json:"addr_info"` }{} - output, err := RunCommand(vmi, "ip -j a show", 2*time.Second) + output, err := cli.RunCommand(vmi, "ip -j a show", 2*time.Second) if err != nil { return nil, fmt.Errorf("failed retrieving adresses with ip command: %s: %w", output, err) } diff --git a/test/e2e/kubevirt/net.go b/test/e2e/kubevirt/net.go index 8c65118ae1..03b7e819ff 100644 --- a/test/e2e/kubevirt/net.go +++ b/test/e2e/kubevirt/net.go @@ -27,7 +27,7 @@ nmcli c mod %[1]s ipv4.addresses "" ipv6.addresses "" ipv4.gateway "" ipv6.gatew nmcli d reapply %[1]s`, iface) } -func RetrieveCachedGatewayMAC(vmi *kubevirtv1.VirtualMachineInstance, dev, cidr string) (string, error) { +func RetrieveCachedGatewayMAC(cli *Client, vmi *kubevirtv1.VirtualMachineInstance, dev, cidr string) (string, error) { _, ipNet, err := net.ParseCIDR(cidr) if err != nil { return "", err @@ -35,7 +35,7 @@ func RetrieveCachedGatewayMAC(vmi *kubevirtv1.VirtualMachineInstance, dev, cidr gatewayIP := util.GetNodeGatewayIfAddr(ipNet).IP.String() - output, err := RunCommand(vmi, fmt.Sprintf("ip neigh get %s dev %s", gatewayIP, dev), 2*time.Second) + output, err := cli.RunCommand(vmi, fmt.Sprintf("ip neigh get %s dev %s", gatewayIP, dev), 2*time.Second) if err != nil { return "", fmt.Errorf("%s: %v", output, err) } @@ -46,12 +46,12 @@ func RetrieveCachedGatewayMAC(vmi *kubevirtv1.VirtualMachineInstance, dev, cidr return outputSplit[4], nil } -func RetrieveIPv6Gateways(vmi *v1.VirtualMachineInstance) ([]string, error) { +func RetrieveIPv6Gateways(cli *Client, vmi *v1.VirtualMachineInstance) ([]string, error) { routes := []struct { Gateway string `json:"gateway"` }{} - output, err := RunCommand(vmi, "ip -6 -j route list default", 2*time.Second) + output, err := cli.RunCommand(vmi, "ip -6 -j route list default", 2*time.Second) if err != nil { return nil, fmt.Errorf("%s: %v", output, err) } diff --git a/test/e2e/kubevirt/nmstate.go b/test/e2e/kubevirt/nmstate.go index 10e8e34108..bd852ca794 100644 --- a/test/e2e/kubevirt/nmstate.go +++ b/test/e2e/kubevirt/nmstate.go @@ -27,8 +27,8 @@ type NetworkState struct { Interfaces []Interface `json:"interfaces"` } -func RetrieveNetworkState(vmi *v1.VirtualMachineInstance) (*NetworkState, error) { - output, err := RunCommand(vmi, "nmstatectl show --json", 2*time.Second) +func RetrieveNetworkState(cli *Client, vmi *v1.VirtualMachineInstance) (*NetworkState, error) { + output, err := cli.RunCommand(vmi, "nmstatectl show --json", 2*time.Second) if err != nil { return nil, fmt.Errorf("%s: %v", output, err) } diff --git a/test/e2e/kubevirt/pod.go b/test/e2e/kubevirt/pod.go index 5ad2011a1f..1293e1acc5 100644 --- a/test/e2e/kubevirt/pod.go +++ b/test/e2e/kubevirt/pod.go @@ -1,6 +1,9 @@ package kubevirt import ( + "fmt" + + infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" @@ -31,3 +34,17 @@ func GenerateFakeVirtLauncherPod(namespace, vmName string) *corev1.Pod { }, } } + +func ForceKillVirtLauncherAtNode(p infraapi.Provider, nodeName, vmNamespace, vmName string) error { + // /usr/bin/virt-launcher --qemu-timeout 312s --name worker-dcf9j --uid bcf975f4-7bdd-4264-948b-b6080320e38a --namespace kv-live-migration-2575 --kubevirt-share-dir /var/run/kubevirt --ephemeral-disk-dir /var/run/kubevirt-ephemeral-disks --container-disk-dir /var/run/kubevirt/container-disks --grace-period-seconds 20 --hook-sidecars 0 --ovmf-path /usr/share/OVMF --run-as-nonroot + killScript := fmt.Sprintf(` +pid=$(pgrep -f 'virt-launcher .*--name %s.*--namespace %s'|grep -v $$) +ps aux |grep virt-launcher +kill -9 $pid +`, vmName, vmNamespace) + output, err := p.ExecK8NodeCommand(nodeName, []string{"bash", "-xe", "-c", killScript}) + if err != nil { + return fmt.Errorf("%s:%w", output, err) + } + return nil +} diff --git a/test/e2e/label/component.go b/test/e2e/label/component.go new file mode 100644 index 0000000000..59e61165c5 --- /dev/null +++ b/test/e2e/label/component.go @@ -0,0 +1,7 @@ +package label + +import "github.com/onsi/ginkgo/v2" + +func ComponentName() ginkgo.Labels { + return NewComponent("ovn-kubernetes") +} diff --git a/test/e2e/label/label.go b/test/e2e/label/label.go new file mode 100644 index 0000000000..61448bf930 --- /dev/null +++ b/test/e2e/label/label.go @@ -0,0 +1,50 @@ +package label + +import "github.com/onsi/ginkgo/v2" + +// Label is a wrapper for ginkgo label. We need a wrapper because we want to constrain inputs. If Key and Value are not +// empty, then it will be concatenated together seperated by ':'. If Key is not empty and Value is empty, then only the Key is used. +type Label struct { + // Key is mandatory + Key string + // Value is optional + Value string +} + +func (l Label) GinkgoLabel() ginkgo.Labels { + if l.Value == "" { + return ginkgo.Label(l.Key) + } + return ginkgo.Label(l.Key + ":" + l.Value) +} + +func NewComponent(name string) ginkgo.Labels { + return New(name, "").GinkgoLabel() +} + +func New(parts ...string) Label { + if len(parts) == 0 || len(parts) > 2 { + panic("invalid number of label constituents") + } + key, val := processOverrides(parts[0]), processOverrides(parts[1]) + return Label{ + Key: key, + Value: val, + } +} + +func processOverrides(s string) string { + overRide, ok := overrideMap[s] + if !ok { + return s + } + return overRide +} + +// Extended returns a label used to label extended feature tests. This label +// might be used to label feature tests that are considered not to be testing +// the core functionality of a feature and that might be filtered out for +// various reasons like for example to keep selected job run times down. +func Extended() ginkgo.Labels { + return ginkgo.Label("EXTENDED") +} diff --git a/test/e2e/label/override.go b/test/e2e/label/override.go new file mode 100644 index 0000000000..31aa0fa0cd --- /dev/null +++ b/test/e2e/label/override.go @@ -0,0 +1,5 @@ +package label + +// overrideMap is used to rewrite label key and/or values. For example, if you want to rewrite Feature to a downstream specific name, +// therefore youd add "Feature" as a key to the overrides map and value to be what you wish to rewrite it to. +var overrideMap = map[string]string{} diff --git a/test/e2e/localnet-underlay.go b/test/e2e/localnet-underlay.go index 03649143dd..df8caf702f 100644 --- a/test/e2e/localnet-underlay.go +++ b/test/e2e/localnet-underlay.go @@ -1,237 +1 @@ package e2e - -import ( - "context" - "fmt" - "os" - "os/exec" - "strings" - "time" - - "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" - e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" -) - -const ( - defaultOvsBridge = "breth0" - secondaryBridge = "ovsbr1" - add = "add-br" - del = "del-br" -) - -func setupUnderlay(ovsPods []v1.Pod, bridgeName, portName, networkName string, vlanID int) error { - for _, ovsPod := range ovsPods { - if bridgeName != defaultOvsBridge { - if err := addOVSBridge(ovsPod.Namespace, ovsPod.Name, bridgeName); err != nil { - return err - } - - if vlanID > 0 { - if err := ovsEnableVLANAccessPort(ovsPod.Namespace, ovsPod.Name, bridgeName, portName, vlanID); err != nil { - return err - } - } else { - if err := ovsAttachPortToBridge(ovsPod.Namespace, ovsPod.Name, bridgeName, portName); err != nil { - return err - } - } - } - if err := configureBridgeMappings( - ovsPod.Namespace, - ovsPod.Name, - defaultNetworkBridgeMapping(), - bridgeMapping(networkName, bridgeName), - ); err != nil { - return err - } - } - return nil -} - -func ovsRemoveSwitchPort(ovsPods []v1.Pod, portName string, newVLANID int) error { - for _, ovsPod := range ovsPods { - if err := ovsRemoveVLANAccessPort(ovsPod.Namespace, ovsPod.Name, secondaryBridge, portName); err != nil { - return fmt.Errorf("failed to remove old VLAN port: %v", err) - } - - if err := ovsEnableVLANAccessPort(ovsPod.Namespace, ovsPod.Name, secondaryBridge, portName, newVLANID); err != nil { - return fmt.Errorf("failed to add new VLAN port: %v", err) - } - } - - return nil -} - -func teardownUnderlay(ovsPods []v1.Pod, bridgeName string) error { - for _, ovsPod := range ovsPods { - if bridgeName != defaultOvsBridge { - if err := removeOVSBridge(ovsPod.Namespace, ovsPod.Name, bridgeName); err != nil { - return err - } - } - // restore default bridge mapping - if err := configureBridgeMappings( - ovsPod.Namespace, - ovsPod.Name, - defaultNetworkBridgeMapping(), - ); err != nil { - return err - } - } - return nil -} - -func ovsPods(clientSet clientset.Interface) []v1.Pod { - const ( - ovsNodeLabel = "app=ovs-node" - ) - pods, err := clientSet.CoreV1().Pods(deploymentconfig.Get().OVNKubernetesNamespace()).List( - context.Background(), - metav1.ListOptions{LabelSelector: ovsNodeLabel}, - ) - if err != nil { - return nil - } - return pods.Items -} - -func addOVSBridge(podNamespace, podName string, bridgeName string) error { - cmd := strings.Join([]string{"ovs-vsctl", add, bridgeName}, " ") - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to add ovs bridge %q: %v", bridgeName, err) - } - return nil -} - -func removeOVSBridge(podNamespace, podName string, bridgeName string) error { - cmd := strings.Join([]string{"ovs-vsctl", del, bridgeName}, " ") - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to add ovs bridge %q: %v", bridgeName, err) - } - return nil -} - -func ovsAttachPortToBridge(podNamespace, podName string, bridgeName string, portName string) error { - cmd := strings.Join([]string{ - "ovs-vsctl", "add-port", bridgeName, portName, - }, " ") - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to remove port %s from OVS bridge %s: %v", portName, bridgeName, err) - } - return nil -} - -func ovsEnableVLANAccessPort(podNamespace, podName string, bridgeName string, portName string, vlanID int) error { - cmd := strings.Join([]string{ - "ovs-vsctl", "add-port", bridgeName, portName, fmt.Sprintf("tag=%d", vlanID), "vlan_mode=access", - }, " ") - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to remove port %s from OVS bridge %s: %v", portName, bridgeName, err) - } - return nil -} - -func ovsRemoveVLANAccessPort(podNamespace, podName string, bridgeName string, portName string) error { - cmd := strings.Join([]string{ - "ovs-vsctl", "del-port", bridgeName, portName, - }, " ") - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to remove port %s from OVS bridge %s: %v", portName, bridgeName, err) - } - return nil -} - -type BridgeMapping struct { - physnet string - ovsBridge string -} - -func (bm BridgeMapping) String() string { - return fmt.Sprintf("%s:%s", bm.physnet, bm.ovsBridge) -} - -type BridgeMappings []BridgeMapping - -func (bms BridgeMappings) String() string { - return strings.Join(Map(bms, func(bm BridgeMapping) string { return bm.String() }), ",") -} - -func Map[T, V any](items []T, fn func(T) V) []V { - result := make([]V, len(items)) - for i, t := range items { - result[i] = fn(t) - } - return result -} - -func configureBridgeMappings(podNamespace, podName string, mappings ...BridgeMapping) error { - mappingsString := fmt.Sprintf("external_ids:ovn-bridge-mappings=%s", BridgeMappings(mappings).String()) - cmd := strings.Join([]string{"ovs-vsctl", "set", "open", ".", mappingsString}, " ") - if _, err := e2epodoutput.RunHostCmdWithRetries(podNamespace, podName, cmd, time.Second, time.Second*5); err != nil { - return fmt.Errorf("failed to configure bridge mappings %q: %v", mappingsString, err) - } - return nil -} - -func defaultNetworkBridgeMapping() BridgeMapping { - return BridgeMapping{ - physnet: "physnet", - ovsBridge: "breth0", - } -} - -func bridgeMapping(physnet, ovsBridge string) BridgeMapping { - return BridgeMapping{ - physnet: physnet, - ovsBridge: ovsBridge, - } -} - -// TODO: make this function idempotent; use golang netlink instead -func createVLANInterface(deviceName string, vlanID string, ipAddress *string) error { - vlan := vlanName(deviceName, vlanID) - cmd := exec.Command("sudo", "ip", "link", "add", "link", deviceName, "name", vlan, "type", "vlan", "id", vlanID) - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to create vlan interface %s: %v", vlan, err) - } - - cmd = exec.Command("sudo", "ip", "link", "set", "dev", vlan, "up") - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to enable vlan interface %s: %v", vlan, err) - } - - if ipAddress != nil { - cmd = exec.Command("sudo", "ip", "addr", "add", *ipAddress, "dev", vlan) - cmd.Stderr = os.Stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to define the vlan interface %q IP Address %s: %v", vlan, *ipAddress, err) - } - } - return nil -} - -// TODO: make this function idempotent; use golang netlink instead -func deleteVLANInterface(deviceName string, vlanID string) error { - vlan := vlanName(deviceName, vlanID) - cmd := exec.Command("sudo", "ip", "link", "del", vlan) - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to delete vlan interface %s: %v", vlan, err) - } - return nil -} - -func vlanName(deviceName string, vlanID string) string { - // MAX IFSIZE 16; got to truncate it to add the vlan suffix - if len(deviceName)+len(vlanID)+1 > 16 { - deviceName = deviceName[:len(deviceName)-len(vlanID)-1] - } - return fmt.Sprintf("%s.%s", deviceName, vlanID) -} diff --git a/test/e2e/multi_node_zones_interconnect.go b/test/e2e/multi_node_zones_interconnect.go index 5737ec3680..d4cc5356b2 100644 --- a/test/e2e/multi_node_zones_interconnect.go +++ b/test/e2e/multi_node_zones_interconnect.go @@ -9,6 +9,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -87,13 +88,11 @@ func checkPodsInterconnectivity(clientPod, serverPod *v1.Pod, namespace string, return nil } -var _ = ginkgo.Describe("Multi node zones interconnect", func() { +var _ = ginkgo.Describe("Multi node zones interconnect", feature.Interconnect, func() { const ( - serverPodNodeName = "ovn-control-plane" - serverPodName = "server-pod" - clientPodNodeName = "ovn-worker3" - clientPodName = "client-pod" + serverPodName = "server-pod" + clientPodName = "client-pod" ) fr := wrappedTestFramework("multi-node-zones") @@ -119,13 +118,13 @@ var _ = ginkgo.Describe("Multi node zones interconnect", func() { len(nodes.Items)) } - serverPodNode, err = cs.CoreV1().Nodes().Get(context.TODO(), serverPodNodeName, metav1.GetOptions{}) + serverPodNode, err = cs.CoreV1().Nodes().Get(context.TODO(), nodes.Items[0].Name, metav1.GetOptions{}) if err != nil { e2eskipper.Skipf( "Test requires node with the name %s", serverPodName, ) } - clientPodNode, err = cs.CoreV1().Nodes().Get(context.TODO(), clientPodNodeName, metav1.GetOptions{}) + clientPodNode, err = cs.CoreV1().Nodes().Get(context.TODO(), nodes.Items[1].Name, metav1.GetOptions{}) if err != nil { e2eskipper.Skipf( "Test requires node with the name %s", clientPodName, @@ -140,7 +139,7 @@ var _ = ginkgo.Describe("Multi node zones interconnect", func() { if serverPodNodeZone == clientPodNodeZone { e2eskipper.Skipf( - "Test requires nodes %s and %s are in different zones", serverPodNodeName, clientPodNodeName, + "Test requires nodes %s and %s are in different zones", nodes.Items[0].Name, nodes.Items[1].Name, ) } }) @@ -149,13 +148,13 @@ var _ = ginkgo.Describe("Multi node zones interconnect", func() { // Create a server pod on zone - zone-1 cmd := httpServerContainerCmd(8000) serverPod := e2epod.NewAgnhostPod(fr.Namespace.Name, serverPodName, nil, nil, nil, cmd...) - serverPod.Spec.NodeName = serverPodNodeName + serverPod.Spec.NodeName = serverPodNode.Name e2epod.NewPodClient(fr).CreateSync(context.TODO(), serverPod) // Create a client pod on zone - zone-2 cmd = []string{} clientPod := e2epod.NewAgnhostPod(fr.Namespace.Name, clientPodName, nil, nil, nil, cmd...) - clientPod.Spec.NodeName = clientPodNodeName + clientPod.Spec.NodeName = clientPodNode.Name e2epod.NewPodClient(fr).CreateSync(context.TODO(), clientPod) ginkgo.By("asserting the *client* pod can contact the server pod exposed endpoint") @@ -163,7 +162,7 @@ var _ = ginkgo.Describe("Multi node zones interconnect", func() { framework.ExpectNoError(err, "failed to check pods interconnectivity") // Change the zone of client-pod node to that of server-pod node - s := fmt.Sprintf("Changing the client-pod node %s zone from %s to %s", clientPodNodeName, clientPodNodeZone, serverPodNodeZone) + s := fmt.Sprintf("Changing the client-pod node %s zone from %s to %s", clientPodNode.Name, clientPodNodeZone, serverPodNodeZone) ginkgo.By(s) err = changeNodeZone(clientPodNode, serverPodNodeZone, cs) framework.ExpectNoError(err, "failed to change node zone") @@ -173,7 +172,7 @@ var _ = ginkgo.Describe("Multi node zones interconnect", func() { framework.ExpectNoError(err, "failed to check pods interconnectivity") // Change back the zone of client-pod node - s = fmt.Sprintf("Changing back the client-pod node %s zone from %s to %s", clientPodNodeName, serverPodNodeZone, clientPodNodeZone) + s = fmt.Sprintf("Changing back the client-pod node %s zone from %s to %s", clientPodNode.Name, serverPodNodeZone, clientPodNodeZone) ginkgo.By(s) err = changeNodeZone(clientPodNode, clientPodNodeZone, cs) framework.ExpectNoError(err, "failed to change node zone") diff --git a/test/e2e/multicast.go b/test/e2e/multicast.go index f90cf37b5f..d9b2bc3d9c 100644 --- a/test/e2e/multicast.go +++ b/test/e2e/multicast.go @@ -8,6 +8,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,7 +25,7 @@ type nodeInfo struct { nodeIP string } -var _ = ginkgo.Describe("Multicast", func() { +var _ = ginkgo.Describe("Multicast", feature.Multicast, func() { fr := wrappedTestFramework("multicast") diff --git a/test/e2e/multihoming.go b/test/e2e/multihoming.go index 49884ab548..e82255fc57 100644 --- a/test/e2e/multihoming.go +++ b/test/e2e/multihoming.go @@ -5,17 +5,14 @@ import ( "errors" "fmt" "net/netip" - "os" - "os/exec" - "strconv" "strings" "time" "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" - "github.com/docker/docker/client" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,6 +27,10 @@ import ( ipgenerator "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/generator/ip" util "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/images" + "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" + infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" ) const ( @@ -37,7 +38,7 @@ const ( nodeHostnameKey = "kubernetes.io/hostname" ) -var _ = Describe("Multi Homing", func() { +var _ = Describe("Multi Homing", feature.MultiHoming, func() { const ( podName = "tinypod" secondaryNetworkCIDR = "10.128.0.0/16" @@ -54,9 +55,10 @@ var _ = Describe("Multi Homing", func() { f := wrappedTestFramework("multi-homing") var ( - cs clientset.Interface - nadClient nadclient.K8sCniCncfIoV1Interface - mnpClient mnpclient.K8sCniCncfIoV1beta1Interface + cs clientset.Interface + nadClient nadclient.K8sCniCncfIoV1Interface + mnpClient mnpclient.K8sCniCncfIoV1beta1Interface + providerCtx infraapi.Context ) BeforeEach(func() { @@ -67,6 +69,7 @@ var _ = Describe("Multi Homing", func() { Expect(err).NotTo(HaveOccurred()) mnpClient, err = mnpclient.NewForConfig(f.ClientConfig()) Expect(err).NotTo(HaveOccurred()) + providerCtx = infraprovider.Get().NewTestContext() }) Context("A single pod with an OVN-K secondary network", func() { @@ -78,8 +81,10 @@ var _ = Describe("Multi Homing", func() { if netConfig.topology == "localnet" { By("applying ovs bridge mapping") - Expect(setBridgeMappings(cs, defaultNetworkBridgeMapping(), bridgeMapping(netConfig.networkName, secondaryBridge))).NotTo(HaveOccurred()) - ginkgo.DeferCleanup(setBridgeMappings, cs, defaultNetworkBridgeMapping()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: netConfig.vlanID, + })).To(Succeed()) } By("creating the attachment configuration") @@ -271,18 +276,26 @@ var _ = Describe("Multi Homing", func() { ) const ( - clientPodName = "client-pod" - clientIPOffset = 100 - serverIPOffset = 102 - port = 9000 - workerOneNodeName = "ovn-worker" - workerTwoNodeName = "ovn-worker2" + clientPodName = "client-pod" + clientIPOffset = 100 + serverIPOffset = 102 + port = 9000 ) - ginkgo.DescribeTable("attached to a localnet network mapped to breth0", - - func(netConfigParams networkAttachmentConfigParams, clientPodConfig, serverPodConfig podConfiguration) { + ginkgo.DescribeTable("attached to a localnet network mapped to external primary interface bridge", //nolint:lll + func(netConfigParams networkAttachmentConfigParams, clientPodConfig, serverPodConfig podConfiguration, isCollocatedPods bool) { + By("Get two scheduable nodes and ensure client and server are located on distinct Nodes") + nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.Background(), f.ClientSet, 2) + framework.ExpectNoError(err, "2 scheduable nodes are required") + Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "cluster should have at least 2 nodes") + if isCollocatedPods { + clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].GetName()} + serverPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].GetName()} + } else { + clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].GetName()} + serverPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[1].GetName()} + } netConfig := newNetworkAttachmentConfig(networkAttachmentConfigParams{ name: secondaryNetworkName, namespace: f.Namespace.Name, @@ -296,17 +309,14 @@ var _ = Describe("Multi Homing", func() { } By("setting up the localnet underlay") - pods := ovsPods(cs) - Expect(pods).NotTo(BeEmpty()) - defer func() { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(pods, defaultOvsBridge)).To(Succeed()) - }() - Expect(setupUnderlay(pods, defaultOvsBridge, "", netConfig.networkName, netConfig.vlanID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + BridgeName: deploymentconfig.Get().ExternalBridgeName(), + LogicalNetworkName: netConfig.networkName, + })).To(Succeed()) nad := generateNAD(netConfig) By(fmt.Sprintf("creating the attachment configuration: %v\n", nad)) - _, err := nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create( + _, err = nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create( context.Background(), nad, metav1.CreateOptions{}, @@ -332,17 +342,31 @@ var _ = Describe("Multi Homing", func() { kickstartPod(cs, clientPodConfig) // Check that the client pod can reach the server pod on the server localnet interface - serverIPs, err := podIPsForAttachment(cs, f.Namespace.Name, serverPod.GetName(), netConfig.name) + var serverIPs []string + if serverPodConfig.hostNetwork { + serverIPs, err = podIPsFromStatus(cs, serverPodConfig.namespace, serverPodConfig.name) + } else { + serverIPs, err = podIPsForAttachment(cs, serverPod.Namespace, serverPod.Name, netConfig.name) + + } Expect(err).NotTo(HaveOccurred()) + for _, serverIP := range serverIPs { By(fmt.Sprintf("asserting the *client* can contact the server pod exposed endpoint: %q on port %q", serverIP, port)) + curlArgs := []string{} + pingArgs := []string{} + if clientPodConfig.attachments != nil { + // When the client is attached to a localnet, send probes from the localnet interface + curlArgs = []string{"--interface", "net1"} + pingArgs = []string{"-I", "net1"} + } Eventually(func() error { - return reachServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP, port) + return reachServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP, port, curlArgs...) }, 2*time.Minute, 6*time.Second).Should(Succeed()) By(fmt.Sprintf("asserting the *client* can ping the server pod exposed endpoint: %q", serverIP)) Eventually(func() error { - return pingServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP) + return pingServerPodFromClient(cs, serverPodConfig, clientPodConfig, serverIP, pingArgs...) }, 2*time.Minute, 6*time.Second).Should(Succeed()) } }, @@ -354,7 +378,6 @@ var _ = Describe("Multi Homing", func() { }, podConfiguration{ // client on default network name: clientPodName, - nodeSelector: map[string]string{nodeHostnameKey: workerOneNodeName}, isPrivileged: true, }, podConfiguration{ // server attached to localnet secondary network @@ -363,9 +386,9 @@ var _ = Describe("Multi Homing", func() { }}, name: podName, containerCmd: httpServerContainerCmd(port), - nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, needsIPRequestFromHostSubnet: true, // will override attachments above with an IPRequest }, + false, // scheduled on distinct Nodes Label("BUG", "OCPBUGS-43004"), ), ginkgo.Entry( @@ -376,7 +399,6 @@ var _ = Describe("Multi Homing", func() { }, podConfiguration{ // client on default network name: clientPodName + "-same-node", - nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, isPrivileged: true, }, podConfiguration{ // server attached to localnet secondary network @@ -385,23 +407,65 @@ var _ = Describe("Multi Homing", func() { }}, name: podName, containerCmd: httpServerContainerCmd(port), - nodeSelector: map[string]string{nodeHostnameKey: workerTwoNodeName}, needsIPRequestFromHostSubnet: true, }, + true, // collocated on same Node Label("BUG", "OCPBUGS-43004"), ), + ginkgo.Entry( + "can reach a host-networked pod on a different node", + networkAttachmentConfigParams{ + name: secondaryNetworkName, + topology: "localnet", + }, + podConfiguration{ // client on localnet + attachments: []nadapi.NetworkSelectionElement{{ + Name: secondaryNetworkName, + }}, + name: clientPodName, + isPrivileged: true, + needsIPRequestFromHostSubnet: true, + }, + podConfiguration{ // server on default network, pod is host-networked + name: podName, + containerCmd: httpServerContainerCmd(port), + hostNetwork: true, + }, + false, // not collocated on same node + Label("STORY", "SDN-5345"), + ), + ginkgo.Entry( + "can reach a host-networked pod on the same node", + networkAttachmentConfigParams{ + name: secondaryNetworkName, + topology: "localnet", + }, + podConfiguration{ // client on localnet + attachments: []nadapi.NetworkSelectionElement{{ + Name: secondaryNetworkName, + }}, + name: clientPodName, + isPrivileged: true, + needsIPRequestFromHostSubnet: true, + }, + podConfiguration{ // server on default network, pod is host-networked + name: podName, + containerCmd: httpServerContainerCmd(port), + hostNetwork: true, + }, + true, // collocated on same node + Label("STORY", "SDN-5345"), + ), ) }) Context("multiple pods connected to the same OVN-K secondary network", func() { const ( - workerOneNodeName = "ovn-worker" - workerTwoNodeName = "ovn-worker2" - clientPodName = "client-pod" - nodeHostnameKey = "kubernetes.io/hostname" - port = 9000 - clientIP = "192.168.200.10/24" - staticServerIP = "192.168.200.20/24" + clientPodName = "client-pod" + nodeHostnameKey = "kubernetes.io/hostname" + port = 9000 + clientIP = "192.168.200.10/24" + staticServerIP = "192.168.200.20/24" ) ginkgo.It("eventually configures pods that were added to an already existing network before the nad", func() { @@ -483,16 +547,10 @@ var _ = Describe("Multi Homing", func() { serverPodConfig.namespace = f.Namespace.Name if netConfig.topology == "localnet" { - By("setting up the localnet underlay") - nodes := ovsPods(cs) - Expect(nodes).NotTo(BeEmpty()) - defer func() { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) - }() - - const secondaryInterfaceName = "eth1" - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: netConfig.vlanID, + })).To(Succeed()) } By("creating the attachment configuration") @@ -506,6 +564,7 @@ var _ = Describe("Multi Homing", func() { By("Get two scheduable nodes and schedule client and server to be on distinct Nodes") nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.Background(), f.ClientSet, 2) framework.ExpectNoError(err, "2 scheduable nodes are required") + Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "cluster should have at least 2 nodes") clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[0].GetName()} serverPodConfig.nodeSelector = map[string]string{nodeHostnameKey: nodes.Items[1].GetName()} @@ -837,19 +896,17 @@ var _ = Describe("Multi Homing", func() { Context("localnet OVN-K secondary network", func() { const ( - clientPodName = "client-pod" - nodeHostnameKey = "kubernetes.io/hostname" - servicePort uint16 = 9000 - dockerNetworkName = "underlay" - underlayServiceIP = "60.128.0.1" - secondaryInterfaceName = "eth1" - expectedOriginalMTU = 1200 + clientPodName = "client-pod" + nodeHostnameKey = "kubernetes.io/hostname" + servicePort uint16 = 9000 + dockerNetworkName = "underlay" + underlayServiceIP = "60.128.0.1" + expectedOriginalMTU = 1200 ) - var netConfig networkAttachmentConfig - var nodes []v1.Pod - var underlayBridgeName string - var cmdWebServer *exec.Cmd + var ( + netConfig networkAttachmentConfig + ) underlayIP := underlayServiceIP + "/24" Context("with a service running on the underlay", func() { @@ -866,33 +923,29 @@ var _ = Describe("Multi Homing", func() { }) By("setting up the localnet underlay") - nodes = ovsPods(cs) - Expect(nodes).NotTo(BeEmpty()) - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) - }) - - BeforeEach(func() { - By("adding IP to the underlay docker bridge") - cli, err := client.NewClientWithOpts(client.FromEnv) - Expect(err).NotTo(HaveOccurred()) - - gatewayIP, err := getNetworkGateway(cli, dockerNetworkName) - Expect(err).NotTo(HaveOccurred()) - - underlayBridgeName, err = findInterfaceByIP(gatewayIP) - Expect(err).NotTo(HaveOccurred()) - - cmd := exec.Command("sudo", "ip", "addr", "add", underlayIP, "dev", underlayBridgeName) - cmd.Stderr = os.Stderr - err = cmd.Run() - Expect(err).NotTo(HaveOccurred()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: netConfig.vlanID, + })).To(Succeed()) }) BeforeEach(func() { By("starting a service, connected to the underlay") - cmdWebServer = exec.Command("python3", "-m", "http.server", "--bind", underlayServiceIP, strconv.Itoa(int(servicePort))) - cmdWebServer.Stderr = os.Stderr - Expect(cmdWebServer.Start()).NotTo(HaveOccurred(), "failed to create web server, port might be busy") + providerCtx = infraprovider.Get().NewTestContext() + + underlayNetwork, err := infraprovider.Get().GetNetwork(dockerNetworkName) + Expect(err).NotTo(HaveOccurred(), "must get underlay network") + externalContainerName := f.Namespace.Name + "-web-server" + serviceContainerSpec := infraapi.ExternalContainer{ + Name: externalContainerName, + Image: images.AgnHost(), + Network: underlayNetwork, + Entrypoint: "bash", + CmdArgs: []string{"-c", fmt.Sprintf("ip a add %s/24 dev eth0 && ./agnhost netexec --http-port=%d", underlayServiceIP, servicePort)}, + ExtPort: servicePort, + } + _, err = providerCtx.CreateExternalContainer(serviceContainerSpec) + Expect(err).NotTo(HaveOccurred(), "must create external container 1") }) BeforeEach(func() { @@ -905,23 +958,6 @@ var _ = Describe("Multi Homing", func() { Expect(err).NotTo(HaveOccurred()) }) - AfterEach(func() { - err := cmdWebServer.Process.Kill() - Expect(err).NotTo(HaveOccurred()) - }) - - AfterEach(func() { - cmd := exec.Command("sudo", "ip", "addr", "del", underlayIP, "dev", underlayBridgeName) - cmd.Stderr = os.Stderr - err := cmd.Run() - Expect(err).NotTo(HaveOccurred()) - }) - - AfterEach(func() { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) - }) - It("correctly sets the MTU on the pod", func() { Eventually(func() error { clientPodConfig := podConfiguration{ @@ -952,6 +988,7 @@ var _ = Describe("Multi Homing", func() { By("asserting the *client* pod can contact the underlay service") Expect(connectToServer(clientPodConfig, underlayServiceIP, servicePort)).To(Succeed()) + }) Context("and networkAttachmentDefinition is modified", func() { @@ -1049,7 +1086,10 @@ var _ = Describe("Multi Homing", func() { Context("and the service connected to the underlay is reconfigured to connect to the new VLAN-ID", func() { BeforeEach(func() { - Expect(ovsRemoveSwitchPort(nodes, secondaryInterfaceName, newLocalnetVLANID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: newLocalnetVLANID, + })).To(Succeed(), "configuring the OVS bridge with new localnet vlan id") }) It("can now communicate over a localnet secondary network from pod to the underlay service", func() { @@ -1239,9 +1279,6 @@ var _ = Describe("Multi Homing", func() { Context("with a trunked configuration", func() { const vlanID = 20 BeforeEach(func() { - nodes = ovsPods(cs) - Expect(nodes).NotTo(BeEmpty()) - // we are setting up the bridge in trunked mode by not // specifying a particular VLAN ID on the network conf netConfig = newNetworkAttachmentConfig( @@ -1254,32 +1291,35 @@ var _ = Describe("Multi Homing", func() { }) By("setting up the localnet underlay with a trunked configuration") - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed(), "configuring the OVS bridge") - - By(fmt.Sprintf("creating a VLAN interface on top of the bridge connecting the cluster nodes with IP: %s", underlayIP)) - cli, err := client.NewClientWithOpts(client.FromEnv) - Expect(err).NotTo(HaveOccurred()) - - gatewayIP, err := getNetworkGateway(cli, dockerNetworkName) - Expect(err).NotTo(HaveOccurred()) - - underlayBridgeName, err = findInterfaceByIP(gatewayIP) - Expect(err).NotTo(HaveOccurred()) - Expect(createVLANInterface(underlayBridgeName, strconv.Itoa(vlanID), &underlayIP)).To( - Succeed(), - "create a VLAN interface on the bridge interconnecting the cluster nodes", - ) - - By("starting a service, connected to the underlay") - cmdWebServer = exec.Command("python3", "-m", "http.server", "--bind", underlayServiceIP, strconv.Itoa(port)) - cmdWebServer.Stderr = os.Stderr - Expect(cmdWebServer.Start()).NotTo(HaveOccurred(), "failed to create web server, port might be busy") - }) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: netConfig.vlanID, + })).To(Succeed(), "configuring the OVS bridge") + + By("starting a service, connected to the underlay over a VLAN") + providerCtx = infraprovider.Get().NewTestContext() + + ifName := "eth0" + vlanName := fmt.Sprintf("%s.%d", ifName, vlanID) + underlayNetwork, err := infraprovider.Get().GetNetwork(dockerNetworkName) + Expect(err).NotTo(HaveOccurred(), "must get underlay network") + externalContainerName := f.Namespace.Name + "-web-server" + serviceContainerSpec := infraapi.ExternalContainer{ + Name: externalContainerName, + Image: images.AgnHost(), + Network: underlayNetwork, + Entrypoint: "bash", + ExtPort: servicePort, + CmdArgs: []string{"-c", fmt.Sprintf(` +ip link add link %[1]s name %[2]s type vlan id %[3]d +ip link set dev %[2]s up +ip a add %[4]s/24 dev %[2]s +./agnhost netexec --http-port=%[5]d +`, ifName, vlanName, vlanID, underlayServiceIP, servicePort)}, + } + _, err = providerCtx.CreateExternalContainer(serviceContainerSpec) + Expect(err).NotTo(HaveOccurred(), "must create external container 1") - AfterEach(func() { - Expect(cmdWebServer.Process.Kill()).NotTo(HaveOccurred(), "kill the python webserver") - Expect(deleteVLANInterface(underlayBridgeName, strconv.Itoa(vlanID))).NotTo(HaveOccurred(), "remove the underlay physical configuration") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed(), "tear down the localnet underlay") }) It("the same bridge mapping can be shared by a separate VLAN by using the physical network name attribute", func() { @@ -1312,6 +1352,7 @@ var _ = Describe("Multi Homing", func() { By(fmt.Sprintf("asserting the *client* pod can contact the underlay service with IP %q on the separate vlan", underlayIP)) Expect(connectToServer(clientPodConfig, underlayServiceIP, servicePort)).To(Succeed()) + }) }) }) @@ -1359,15 +1400,10 @@ var _ = Describe("Multi Homing", func() { if netConfig.topology == "localnet" { By("setting up the localnet underlay") - nodes := ovsPods(cs) - Expect(nodes).NotTo(BeEmpty()) - defer func() { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) - }() - - const secondaryInterfaceName = "eth1" - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: netConfig.vlanID, + })).To(Succeed()) } Expect(createNads(f, nadClient, extraNamespace, netConfig)).NotTo(HaveOccurred()) @@ -1786,14 +1822,10 @@ var _ = Describe("Multi Homing", func() { netConfig := newNetworkAttachmentConfig(netConfigParams) By("setting up the localnet underlay") - nodes := ovsPods(cs) - Expect(nodes).NotTo(BeEmpty()) - defer func() { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) - }() - const secondaryInterfaceName = "eth1" - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: netConfig.vlanID, + })).To(Succeed()) Expect(createNads(f, nadClient, extraNamespace, netConfig)).NotTo(HaveOccurred()) @@ -1918,14 +1950,10 @@ var _ = Describe("Multi Homing", func() { netConfig := newNetworkAttachmentConfig(netConfigParams) By("setting up the localnet underlay") - nodes := ovsPods(cs) - Expect(nodes).NotTo(BeEmpty()) - defer func() { - By("tearing down the localnet underlay") - Expect(teardownUnderlay(nodes, secondaryBridge)).To(Succeed()) - }() - const secondaryInterfaceName = "eth1" - Expect(setupUnderlay(nodes, secondaryBridge, secondaryInterfaceName, netConfig.networkName, netConfig.vlanID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + LogicalNetworkName: netConfig.networkName, + VlanID: netConfig.vlanID, + })).To(Succeed()) Expect(createNads(f, nadClient, extraNamespace, netConfig)).NotTo(HaveOccurred()) @@ -2220,18 +2248,3 @@ func addIPRequestToPodConfig(cs clientset.Interface, podConfig *podConfiguration } return nil } - -func setBridgeMappings(cs clientset.Interface, mappings ...BridgeMapping) error { - pods := ovsPods(cs) - if len(pods) == 0 { - return fmt.Errorf("pods list is empty") - } - - for _, pods := range pods { - if err := configureBridgeMappings(pods.Namespace, pods.Name, mappings...); err != nil { - return err - } - } - - return nil -} diff --git a/test/e2e/multihoming_utils.go b/test/e2e/multihoming_utils.go index dea72dce28..d7921cef00 100644 --- a/test/e2e/multihoming_utils.go +++ b/test/e2e/multihoming_utils.go @@ -29,24 +29,33 @@ func netCIDR(netCIDR string, netPrefixLengthPerNode int) string { return fmt.Sprintf("%s/%d", netCIDR, netPrefixLengthPerNode) } -// takes ipv4 and ipv6 cidrs and returns the correct type for the cluster under test -func correctCIDRFamily(ipv4CIDR, ipv6CIDR string) string { - return strings.Join(selectCIDRs(ipv4CIDR, ipv6CIDR), ",") +func joinCIDRs(cidrs ...string) string { + return strings.Join(cidrs, ",") } -// takes ipv4 and ipv6 cidrs and returns the correct type for the cluster under test -func selectCIDRs(ipv4CIDR, ipv6CIDR string) []string { - // dual stack cluster - if isIPv6Supported() && isIPv4Supported() { - return []string{ipv4CIDR, ipv6CIDR} +func splitCIDRs(cidrs string) []string { + if cidrs == "" { + return []string{} } - // is an ipv6 only cluster - if isIPv6Supported() { - return []string{ipv6CIDR} + return strings.Split(cidrs, ",") +} + +func filterCIDRsAndJoin(cs clientset.Interface, cidrs string) string { + if cidrs == "" { + return "" // we may not always set CIDR - i.e. CDN } + return joinCIDRs(filterCIDRs(cs, splitCIDRs(cidrs)...)...) +} - //ipv4 only cluster - return []string{ipv4CIDR} +func filterCIDRs(cs clientset.Interface, cidrs ...string) []string { + var supportedCIDRs []string + for _, cidr := range cidrs { + if !isCIDRIPFamilySupported(cs, cidr) { + continue + } + supportedCIDRs = append(supportedCIDRs, cidr) + } + return supportedCIDRs } func getNetCIDRSubnet(netCIDR string) (string, error) { @@ -161,6 +170,7 @@ type podConfiguration struct { isPrivileged bool labels map[string]string requiresExtraNamespace bool + hostNetwork bool needsIPRequestFromHostSubnet bool } @@ -171,6 +181,7 @@ func generatePodSpec(config podConfiguration) *v1.Pod { } podSpec.Spec.NodeSelector = config.nodeSelector podSpec.Labels = config.labels + podSpec.Spec.HostNetwork = config.hostNetwork if config.isPrivileged { podSpec.Spec.Containers[0].SecurityContext.Privileged = ptr.To(true) } else { @@ -253,17 +264,19 @@ func inRange(cidr string, ip string) error { return fmt.Errorf("ip [%s] is NOT in range %s", ip, cidr) } -func connectToServer(clientPodConfig podConfiguration, serverIP string, port uint16) error { - _, err := e2ekubectl.RunKubectl( - clientPodConfig.namespace, +func connectToServer(clientPodConfig podConfiguration, serverIP string, port uint16, args ...string) error { + target := net.JoinHostPort(serverIP, fmt.Sprintf("%d", port)) + baseArgs := []string{ "exec", clientPodConfig.name, "--", "curl", "--connect-timeout", "2", - net.JoinHostPort(serverIP, fmt.Sprintf("%d", port)), - ) + } + baseArgs = append(baseArgs, args...) + + _, err := e2ekubectl.RunKubectl(clientPodConfig.namespace, append(baseArgs, target)...) return err } @@ -308,16 +321,19 @@ func getSecondaryInterfaceMTU(clientPodConfig podConfiguration) (int, error) { return mtu, nil } -func pingServer(clientPodConfig podConfiguration, serverIP string) error { - _, err := e2ekubectl.RunKubectl( - clientPodConfig.namespace, +func pingServer(clientPodConfig podConfiguration, serverIP string, args ...string) error { + baseArgs := []string{ "exec", clientPodConfig.name, "--", "ping", "-c", "1", // send one ICMP echo request "-W", "2", // timeout after 2 seconds if no response - serverIP) + } + baseArgs = append(baseArgs, args...) + + _, err := e2ekubectl.RunKubectl(clientPodConfig.namespace, append(baseArgs, serverIP)...) + return err } @@ -381,6 +397,18 @@ func podIPForAttachment(k8sClient clientset.Interface, podNamespace string, podN return ips[ipIndex], nil } +func podIPsFromStatus(k8sClient clientset.Interface, podNamespace string, podName string) ([]string, error) { + pod, err := k8sClient.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + podIPs := make([]string, 0, len(pod.Status.PodIPs)) + for _, podIP := range pod.Status.PodIPs { + podIPs = append(podIPs, podIP.IP) + } + return podIPs, nil +} + func allowedClient(podName string) string { return "allowed-" + podName } @@ -610,31 +638,27 @@ func allowedTCPPortsForPolicy(allowPorts ...int) []mnpapi.MultiNetworkPolicyPort return portAllowlist } -<<<<<<< HEAD -func reachServerPodFromClient(cs clientset.Interface, serverConfig podConfiguration, clientConfig podConfiguration, serverIP string, serverPort int) error { -======= -func reachServerPodFromClient(cs clientset.Interface, serverConfig podConfiguration, clientConfig podConfiguration, serverIP string, serverPort uint16) error { ->>>>>>> downstream/release-4.20 +func reachServerPodFromClient(cs clientset.Interface, serverConfig podConfiguration, clientConfig podConfiguration, serverIP string, serverPort uint16, args ...string) error { updatedPod, err := cs.CoreV1().Pods(serverConfig.namespace).Get(context.Background(), serverConfig.name, metav1.GetOptions{}) if err != nil { return err } if updatedPod.Status.Phase == v1.PodRunning { - return connectToServer(clientConfig, serverIP, serverPort) + return connectToServer(clientConfig, serverIP, serverPort, args...) } return fmt.Errorf("pod not running. /me is sad") } -func pingServerPodFromClient(cs clientset.Interface, serverConfig podConfiguration, clientConfig podConfiguration, serverIP string) error { +func pingServerPodFromClient(cs clientset.Interface, serverConfig podConfiguration, clientConfig podConfiguration, serverIP string, args ...string) error { updatedPod, err := cs.CoreV1().Pods(serverConfig.namespace).Get(context.Background(), serverConfig.name, metav1.GetOptions{}) if err != nil { return err } if updatedPod.Status.Phase == v1.PodRunning { - return pingServer(clientConfig, serverIP) + return pingServer(clientConfig, serverIP, args...) } return fmt.Errorf("pod not running. /me is sad") @@ -680,3 +704,39 @@ func getNetworkGateway(cli *client.Client, networkName string) (string, error) { return "", fmt.Errorf("Gateway not found for network %q", networkName) } + +func getPodAnnotationForAttachment(pod *v1.Pod, attachmentName string) (PodAnnotation, error) { + podAnnotation, err := unmarshalPodAnnotation(pod.Annotations, attachmentName) + if err != nil { + return PodAnnotation{}, fmt.Errorf("failed to unmarshall annotations for pod %q: %v", pod.Name, err) + } + + return *podAnnotation, nil +} + +func getPodAnnotationIPsForAttachment(k8sClient clientset.Interface, podNamespace string, podName string, attachmentName string) ([]*net.IPNet, error) { + pod, err := k8sClient.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + podAnnotation, err := getPodAnnotationForAttachment(pod, attachmentName) + if err != nil { + return nil, err + } + return podAnnotation.IPs, nil +} + +// podIPsForNetworkByIndex returns the v4 or v6 IPs for a pod on the UDN +func getPodAnnotationIPsForAttachmentByIndex(k8sClient clientset.Interface, podNamespace string, podName string, attachmentName string, index int) (string, error) { + ipnets, err := getPodAnnotationIPsForAttachment(k8sClient, podNamespace, podName, attachmentName) + if err != nil { + return "", err + } + if index >= len(ipnets) { + return "", fmt.Errorf("no IP at index %d for attachment %s on pod %s", index, attachmentName, namespacedName(podNamespace, podName)) + } + if len(ipnets) > 2 { + return "", fmt.Errorf("attachment for network %q with more than two IPs", attachmentName) + } + return ipnets[index].IP.String(), nil +} diff --git a/test/e2e/network_segmentation.go b/test/e2e/network_segmentation.go index a3105f2ab0..bb667b2d94 100644 --- a/test/e2e/network_segmentation.go +++ b/test/e2e/network_segmentation.go @@ -10,6 +10,7 @@ import ( "strings" "time" + udnv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" @@ -20,6 +21,7 @@ import ( "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -44,7 +46,7 @@ const openDefaultPortsAnnotation = "k8s.ovn.org/open-default-ports" const RequiredUDNNamespaceLabel = "k8s.ovn.org/primary-user-defined-network" const OvnPodAnnotationName = "k8s.ovn.org/pod-networks" -var _ = Describe("Network Segmentation", func() { +var _ = Describe("Network Segmentation", feature.NetworkSegmentation, func() { f := wrappedTestFramework("network-segmentation") // disable automatic namespace creation, we need to add the required UDN label f.SkipNamespaceCreation = true @@ -89,6 +91,7 @@ var _ = Describe("Network Segmentation", func() { func(netConfig *networkAttachmentConfigParams) { By("creating the network") netConfig.namespace = f.Namespace.Name + netConfig.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig.cidr) Expect(createNetworkFn(netConfig)).To(Succeed()) By("creating a pod on the udn namespace") @@ -125,7 +128,7 @@ var _ = Describe("Network Segmentation", func() { &networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, ), @@ -133,7 +136,7 @@ var _ = Describe("Network Segmentation", func() { &networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, ), @@ -156,6 +159,7 @@ var _ = Describe("Network Segmentation", func() { By("creating the network") netConfig.namespace = f.Namespace.Name + netConfig.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig.cidr) Expect(createNetworkFn(netConfig)).To(Succeed()) By("creating client/server pods") @@ -171,7 +175,7 @@ var _ = Describe("Network Segmentation", func() { for i, cidr := range strings.Split(netConfig.cidr, ",") { if cidr != "" { By("asserting the server pod has an IP from the configured range") - serverIP, err = podIPsForUserDefinedPrimaryNetwork( + serverIP, err = getPodAnnotationIPsForAttachmentByIndex( cs, f.Namespace.Name, serverPodConfig.name, @@ -197,7 +201,7 @@ var _ = Describe("Network Segmentation", func() { &networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -215,7 +219,7 @@ var _ = Describe("Network Segmentation", func() { &networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -267,6 +271,7 @@ var _ = Describe("Network Segmentation", func() { By("creating the network") netConfigParams.namespace = f.Namespace.Name + netConfigParams.cidr = filterCIDRsAndJoin(f.ClientSet, netConfigParams.cidr) Expect(createNetworkFn(netConfigParams)).To(Succeed()) udnPodConfig.namespace = f.Namespace.Name @@ -384,52 +389,50 @@ var _ = Describe("Network Segmentation", func() { }, 10*time.Second, 1*time.Second).Should(BeTrue()) Expect(udnPod.Status.ContainerStatuses[0].RestartCount).To(Equal(int32(0))) - if !isUDNHostIsolationDisabled() { - By("checking default network hostNetwork pod and non-kubelet host process can't reach the UDN pod") - hostNetPod, err := createPod(f, "host-net-pod", nodeName, - defaultNetNamespace, []string{}, nil, func(pod *v1.Pod) { - pod.Spec.HostNetwork = true - }) - Expect(err).NotTo(HaveOccurred()) + By("checking default network hostNetwork pod and non-kubelet host process can't reach the UDN pod") + hostNetPod, err := createPod(f, "host-net-pod", nodeName, + defaultNetNamespace, []string{}, nil, func(pod *v1.Pod) { + pod.Spec.HostNetwork = true + }) + Expect(err).NotTo(HaveOccurred()) - // positive check for reachable default network pod - for _, destIP := range []string{defaultIPv4, defaultIPv6} { - if destIP == "" { - continue - } - By("checking the default network hostNetwork can reach default pod on IP " + destIP) - Eventually(func() bool { - return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetDefaultPort) == nil - }).Should(BeTrue()) - By("checking the non-kubelet host process can reach default pod on IP " + destIP) - Eventually(func() bool { - _, err := infraprovider.Get().ExecK8NodeCommand(nodeName, []string{ - "curl", "--connect-timeout", "2", - net.JoinHostPort(destIP, fmt.Sprintf("%d", podClusterNetDefaultPort)), + // positive check for reachable default network pod + for _, destIP := range []string{defaultIPv4, defaultIPv6} { + if destIP == "" { + continue + } + By("checking the default network hostNetwork can reach default pod on IP " + destIP) + Eventually(func() bool { + return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetDefaultPort) == nil + }).Should(BeTrue()) + By("checking the non-kubelet host process can reach default pod on IP " + destIP) + Eventually(func() bool { + _, err := infraprovider.Get().ExecK8NodeCommand(nodeName, []string{ + "curl", "--connect-timeout", "2", + net.JoinHostPort(destIP, fmt.Sprintf("%d", podClusterNetDefaultPort)), }) - return err == nil - }).Should(BeTrue()) + return err == nil + }).Should(BeTrue()) + } + // negative check for UDN pod + for _, destIP := range []string{udnIPv4, udnIPv6} { + if destIP == "" { + continue } - // negative check for UDN pod - for _, destIP := range []string{udnIPv4, udnIPv6} { - if destIP == "" { - continue - } - By("checking the default network hostNetwork pod can't reach UDN pod on IP " + destIP) - Consistently(func() bool { - return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetPort) != nil - }, 5*time.Second).Should(BeTrue()) + By("checking the default network hostNetwork pod can't reach UDN pod on IP " + destIP) + Consistently(func() bool { + return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetPort) != nil + }, 5*time.Second).Should(BeTrue()) - By("checking the non-kubelet host process can't reach UDN pod on IP " + destIP) - Consistently(func() bool { - _, err := infraprovider.Get().ExecK8NodeCommand(nodeName, []string{ - "curl", "--connect-timeout", "2", - net.JoinHostPort(destIP, fmt.Sprintf("%d", podClusterNetPort)), + By("checking the non-kubelet host process can't reach UDN pod on IP " + destIP) + Consistently(func() bool { + _, err := infraprovider.Get().ExecK8NodeCommand(nodeName, []string{ + "curl", "--connect-timeout", "2", + net.JoinHostPort(destIP, fmt.Sprintf("%d", podClusterNetPort)), }) - return err != nil - }, 5*time.Second).Should(BeTrue()) - } + return err != nil + }, 5*time.Second).Should(BeTrue()) } By("asserting UDN pod can reach the kapi service in the default network") @@ -495,7 +498,7 @@ var _ = Describe("Network Segmentation", func() { &networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -510,7 +513,7 @@ var _ = Describe("Network Segmentation", func() { &networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -564,11 +567,12 @@ var _ = Describe("Network Segmentation", func() { netConfig := &networkAttachmentConfigParams{ topology: topology, - cidr: correctCIDRFamily(userDefinedv4Subnet, userDefinedv6Subnet), + cidr: joinCIDRs(userDefinedv4Subnet, userDefinedv6Subnet), role: "primary", namespace: namespace, name: network, } + netConfig.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig.cidr) Expect(createNetworkFn(netConfig)).To(Succeed()) // update the name because createNetworkFn may mutate the netConfig.name @@ -606,7 +610,7 @@ var _ = Describe("Network Segmentation", func() { By("creating pod " + podConfig.name + " in " + podConfig.namespace) pod := runUDNPod(cs, podConfig.namespace, podConfig, nil) pods = append(pods, pod) - podIP, err := podIPsForUserDefinedPrimaryNetwork( + podIP, err := getPodAnnotationIPsForAttachmentByIndex( cs, pod.Namespace, pod.Name, @@ -713,16 +717,18 @@ var _ = Describe("Network Segmentation", func() { name: "tenant-blue", namespace: f.Namespace.Name, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", } + netConfig1.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig1.cidr) netConfig2 := networkAttachmentConfigParams{ name: "blue", namespace: f.Namespace.Name + "-tenant", topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", } + netConfig2.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig2.cidr) nodes, err := e2enode.GetBoundedReadySchedulableNodes(context.TODO(), cs, 2) framework.ExpectNoError(err) if len(nodes.Items) < 2 { @@ -786,7 +792,7 @@ var _ = Describe("Network Segmentation", func() { By(fmt.Sprintf("asserting network works in namespace %s", config.namespace)) for i, cidr := range strings.Split(config.cidr, ",") { if cidr != "" { - serverIP, err = podIPsForUserDefinedPrimaryNetwork( + serverIP, err = getPodAnnotationIPsForAttachmentByIndex( cs, config.namespace, serverPodConfig.name, @@ -835,6 +841,7 @@ var _ = Describe("Network Segmentation", func() { DescribeTable("should be able to send multicast UDP traffic between nodes", func(netConfigParams networkAttachmentConfigParams) { ginkgo.By("creating the attachment configuration") netConfigParams.namespace = f.Namespace.Name + netConfigParams.cidr = filterCIDRsAndJoin(cs, netConfigParams.cidr) netConfig := newNetworkAttachmentConfig(netConfigParams) _, err := nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create( context.Background(), @@ -847,19 +854,20 @@ var _ = Describe("Network Segmentation", func() { ginkgo.Entry("with primary layer3 UDN", networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }), ginkgo.Entry("with primary layer2 UDN", networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }), ) DescribeTable("should be able to receive multicast IGMP query", func(netConfigParams networkAttachmentConfigParams) { ginkgo.By("creating the attachment configuration") netConfigParams.namespace = f.Namespace.Name + netConfigParams.cidr = filterCIDRsAndJoin(cs, netConfigParams.cidr) netConfig := newNetworkAttachmentConfig(netConfigParams) _, err := nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create( context.Background(), @@ -872,15 +880,16 @@ var _ = Describe("Network Segmentation", func() { ginkgo.Entry("with primary layer3 UDN", networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), - role: "primary", - }), - ginkgo.Entry("with primary layer2 UDN", networkAttachmentConfigParams{ - name: nadName, - topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }), + // TODO: this test is broken, see https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5309 + //ginkgo.Entry("with primary layer2 UDN", networkAttachmentConfigParams{ + // name: nadName, + // topology: "layer2", + // cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + // role: "primary", + //}), ) }) }) @@ -908,7 +917,7 @@ var _ = Describe("Network Segmentation", func() { Expect(err).NotTo(HaveOccurred()) By("create tests UserDefinedNetwork") - cleanup, err := createManifest(defaultNetNamespace.Name, newPrimaryUserDefinedNetworkManifest(testUdnName)) + cleanup, err := createManifest(defaultNetNamespace.Name, newPrimaryUserDefinedNetworkManifest(cs, testUdnName)) DeferCleanup(cleanup) Expect(err).NotTo(HaveOccurred()) Eventually(userDefinedNetworkReadyFunc(f.DynamicClient, defaultNetNamespace.Name, testUdnName), 5*time.Second).Should(Not(Succeed())) @@ -1125,13 +1134,13 @@ spec: topology: "layer3", name: primaryNadName, networkName: primaryNadName, - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), })) _, err := nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create(context.Background(), primaryNetNad, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("create primary network UserDefinedNetwork") - cleanup, err := createManifest(f.Namespace.Name, newPrimaryUserDefinedNetworkManifest(primaryUdnName)) + cleanup, err := createManifest(f.Namespace.Name, newPrimaryUserDefinedNetworkManifest(cs, primaryUdnName)) DeferCleanup(cleanup) Expect(err).NotTo(HaveOccurred()) @@ -1423,14 +1432,14 @@ spec: topology: "layer3", name: primaryNadName, networkName: primaryNadName, - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), })) _, err := nadClient.NetworkAttachmentDefinitions(primaryNetTenantNs).Create(context.Background(), primaryNetNad, metav1.CreateOptions{}) Expect(err).NotTo(HaveOccurred()) By("create primary Cluster UDN CR") cudnName := randomNetworkMetaName() - cleanup, err := createManifest(f.Namespace.Name, newPrimaryClusterUDNManifest(cudnName, testTenantNamespaces...)) + cleanup, err := createManifest(f.Namespace.Name, newPrimaryClusterUDNManifest(cs, cudnName, testTenantNamespaces...)) DeferCleanup(func() { cleanup() _, err := e2ekubectl.RunKubectl("", "delete", "clusteruserdefinednetwork", cudnName, "--wait", fmt.Sprintf("--timeout=%ds", 60)) @@ -1469,7 +1478,7 @@ spec: Name: externalContainerName, Image: images.AgnHost(), Network: providerPrimaryNetwork, - Args: httpServerContainerCmd(uint16(externalContainerPort)), + CmdArgs: httpServerContainerCmd(uint16(externalContainerPort)), ExtPort: externalContainerPort, } externalContainer, err = providerCtx.CreateExternalContainer(externalContainerSpec) @@ -1492,6 +1501,7 @@ spec: By("creating the network") netConfigParams.namespace = f.Namespace.Name + netConfigParams.cidr = filterCIDRsAndJoin(f.ClientSet, netConfigParams.cidr) Expect(createNetworkFn(netConfigParams)).To(Succeed()) By("instantiating the client pod") @@ -1519,15 +1529,15 @@ spec: Expect(err).NotTo(HaveOccurred()) framework.Logf("Client pod's annotation for network %s is %v", netConfigParams.name, podAnno) - Expect(podAnno.Routes).To(HaveLen(expectedNumberOfRoutes(*netConfigParams))) + Expect(podAnno.Routes).To(HaveLen(expectedNumberOfRoutes(cs, *netConfigParams))) - assertClientExternalConnectivity(clientPodConfig, externalContainer.GetIPv4(), externalContainer.GetIPv6(), externalContainer.GetPort()) + assertClientExternalConnectivity(cs, clientPodConfig, externalContainer.GetIPv4(), externalContainer.GetIPv6(), externalContainer.GetPort()) }, Entry("by one pod over a layer2 network", &networkAttachmentConfigParams{ name: userDefinedNetworkName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig("client-pod"), @@ -1536,7 +1546,7 @@ spec: &networkAttachmentConfigParams{ name: userDefinedNetworkName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig("client-pod"), @@ -1583,7 +1593,7 @@ spec: BeforeEach(func() { By("create tests UserDefinedNetwork") - cleanup, err := createManifest(f.Namespace.Name, newPrimaryUserDefinedNetworkManifest(testUdnName)) + cleanup, err := createManifest(f.Namespace.Name, newPrimaryUserDefinedNetworkManifest(cs, testUdnName)) DeferCleanup(cleanup) Expect(err).NotTo(HaveOccurred()) Eventually(userDefinedNetworkReadyFunc(f.DynamicClient, f.Namespace.Name, testUdnName), 5*time.Second, time.Second).Should(Succeed()) @@ -1644,12 +1654,10 @@ spec: return connectToServer(podConfiguration{namespace: defaultClientPod.Namespace, name: defaultClientPod.Name}, destIP, podClusterNetPort) != nil }, 5*time.Second).Should(BeTrue()) - if !isUDNHostIsolationDisabled() { - By("checking the default hostNetwork pod can't reach UDN pod on IP " + destIP) - Consistently(func() bool { - return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetPort) != nil - }, 5*time.Second).Should(BeTrue()) - } + By("checking the default hostNetwork pod can't reach UDN pod on IP " + destIP) + Consistently(func() bool { + return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetPort) != nil + }, 5*time.Second).Should(BeTrue()) } By("Open UDN pod port") @@ -1694,12 +1702,10 @@ spec: return connectToServer(podConfiguration{namespace: defaultClientPod.Namespace, name: defaultClientPod.Name}, destIP, podClusterNetPort) != nil }, 5*time.Second).Should(BeTrue()) - if !isUDNHostIsolationDisabled() { - By("checking the default hostNetwork pod can't reach UDN pod on IP " + destIP) - Eventually(func() bool { - return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetPort) != nil - }, 5*time.Second).Should(BeTrue()) - } + By("checking the default hostNetwork pod can't reach UDN pod on IP " + destIP) + Eventually(func() bool { + return connectToServer(podConfiguration{namespace: hostNetPod.Namespace, name: hostNetPod.Name}, destIP, podClusterNetPort) != nil + }, 5*time.Second).Should(BeTrue()) } By("Verify syntax error is reported via event") events, err := cs.CoreV1().Events(udnPod.Namespace).List(context.Background(), metav1.ListOptions{}) @@ -1730,6 +1736,7 @@ spec: } By("creating the network") netConfig.namespace = f.Namespace.Name + netConfig.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig.cidr) udnManifest := generateUserDefinedNetworkManifest(&netConfig) cleanup, err := createManifest(netConfig.namespace, udnManifest) Expect(err).ShouldNot(HaveOccurred(), "creating manifest must succeed") @@ -1749,7 +1756,7 @@ spec: clientPodConfig.nodeSelector = map[string]string{nodeHostnameKey: node2Name} runUDNPod(cs, f.Namespace.Name, serverPodConfig, nil) runUDNPod(cs, f.Namespace.Name, clientPodConfig, nil) - serverIP, err := podIPsForUserDefinedPrimaryNetwork(cs, f.Namespace.Name, serverPodConfig.name, namespacedName(f.Namespace.Name, netConfig.name), 0) + serverIP, err := getPodAnnotationIPsForAttachmentByIndex(cs, f.Namespace.Name, serverPodConfig.name, namespacedName(f.Namespace.Name, netConfig.name), 0) Expect(err).ShouldNot(HaveOccurred(), "UDN pod IP must be retrieved") By("restart OVNKube node pods on client and server Nodes and ensure connectivity") serverPod := getPod(f, serverPodConfig.name) @@ -1768,7 +1775,7 @@ spec: networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -1786,7 +1793,7 @@ spec: networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -1880,31 +1887,17 @@ func generateLayer3Subnets(cidrs string) []string { // userDefinedNetworkReadyFunc returns a function that checks for the NetworkCreated condition in the provided udn func userDefinedNetworkReadyFunc(client dynamic.Interface, namespace, name string) func() error { - return func() error { - udn, err := client.Resource(udnGVR).Namespace(namespace).Get(context.Background(), name, metav1.GetOptions{}, "status") - if err != nil { - return err - } - conditions, err := getConditions(udn) - if err != nil { - return err - } - if len(conditions) == 0 { - return fmt.Errorf("no conditions found in: %v", udn) - } - for _, condition := range conditions { - if condition.Type == "NetworkCreated" && condition.Status == metav1.ConditionTrue { - return nil - } - } - return fmt.Errorf("no NetworkCreated condition found in: %v", udn) - } + return networkReadyFunc(client.Resource(udnGVR).Namespace(namespace), name) } // userDefinedNetworkReadyFunc returns a function that checks for the NetworkCreated condition in the provided cluster udn func clusterUserDefinedNetworkReadyFunc(client dynamic.Interface, name string) func() error { + return networkReadyFunc(client.Resource(clusterUDNGVR), name) +} + +func networkReadyFunc(client dynamic.ResourceInterface, name string) func() error { return func() error { - cUDN, err := client.Resource(clusterUDNGVR).Get(context.Background(), name, metav1.GetOptions{}, "status") + cUDN, err := client.Get(context.Background(), name, metav1.GetOptions{}, "status") if err != nil { return err } @@ -2150,7 +2143,7 @@ spec: ` } -func newPrimaryClusterUDNManifest(name string, targetNamespaces ...string) string { +func newPrimaryClusterUDNManifest(cs clientset.Interface, name string, targetNamespaces ...string) string { targetNs := strings.Join(targetNamespaces, ",") return ` apiVersion: k8s.ovn.org/v1 @@ -2167,7 +2160,7 @@ spec: topology: Layer3 layer3: role: Primary - subnets: ` + generateCIDRforClusterUDN("10.20.100.0/16", "2014:100:200::0/60") + subnets: ` + generateCIDRforClusterUDN(cs, "10.20.100.0/16", "2014:100:200::0/60") } func newL2SecondaryUDNManifest(name string) string { @@ -2184,7 +2177,7 @@ spec: ` } -func newPrimaryUserDefinedNetworkManifest(name string) string { +func newPrimaryUserDefinedNetworkManifest(cs clientset.Interface, name string) string { return ` apiVersion: k8s.ovn.org/v1 kind: UserDefinedNetwork @@ -2194,19 +2187,19 @@ spec: topology: Layer3 layer3: role: Primary - subnets: ` + generateCIDRforUDN("10.20.100.0/16", "2014:100:200::0/60") + subnets: ` + generateCIDRforUDN(cs, "10.20.100.0/16", "2014:100:200::0/60") } -func generateCIDRforUDN(v4, v6 string) string { +func generateCIDRforUDN(cs clientset.Interface, v4, v6 string) string { cidr := ` - cidr: ` + v4 + ` ` - if isIPv6Supported() && isIPv4Supported() { + if isIPv6Supported(cs) && isIPv4Supported(cs) { cidr = ` - cidr: ` + v4 + ` - cidr: ` + v6 + ` ` - } else if isIPv6Supported() { + } else if isIPv6Supported(cs) { cidr = ` - cidr: ` + v6 + ` ` @@ -2214,11 +2207,33 @@ func generateCIDRforUDN(v4, v6 string) string { return cidr } -func generateCIDRforClusterUDN(v4, v6 string) string { +func filterDualStackCIDRs(cs clientset.Interface, cidrs udnv1.DualStackCIDRs) udnv1.DualStackCIDRs { + filteredCIDRs := make(udnv1.DualStackCIDRs, 0, len(cidrs)) + for _, cidr := range cidrs { + if !isCIDRIPFamilySupported(cs, string(cidr)) { + continue + } + filteredCIDRs = append(filteredCIDRs, cidr) + } + return filteredCIDRs +} + +func filterL3Subnets(cs clientset.Interface, l3Subnets []udnv1.Layer3Subnet) []udnv1.Layer3Subnet { + filteredL3Subnets := make([]udnv1.Layer3Subnet, 0, len(l3Subnets)) + for _, l3Subnet := range l3Subnets { + if !isCIDRIPFamilySupported(cs, string(l3Subnet.CIDR)) { + continue + } + filteredL3Subnets = append(filteredL3Subnets, l3Subnet) + } + return filteredL3Subnets +} + +func generateCIDRforClusterUDN(cs clientset.Interface, v4, v6 string) string { cidr := `[{cidr: ` + v4 + `}]` - if isIPv6Supported() && isIPv4Supported() { + if isIPv6Supported(cs) && isIPv4Supported(cs) { cidr = `[{cidr: ` + v4 + `},{cidr: ` + v6 + `}]` - } else if isIPv6Supported() { + } else if isIPv6Supported(cs) { cidr = `[{cidr: ` + v6 + `}]` } return cidr @@ -2260,26 +2275,6 @@ func withNetworkAttachment(networks []nadapi.NetworkSelectionElement) podOption } } -// podIPsForUserDefinedPrimaryNetwork returns the v4 or v6 IPs for a pod on the UDN -func podIPsForUserDefinedPrimaryNetwork(k8sClient clientset.Interface, podNamespace string, podName string, attachmentName string, index int) (string, error) { - pod, err := k8sClient.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{}) - if err != nil { - return "", err - } - netStatus, err := userDefinedNetworkStatus(pod, attachmentName) - if err != nil { - return "", err - } - - if len(netStatus.IPs) == 0 { - return "", fmt.Errorf("attachment for network %q without IPs", attachmentName) - } - if len(netStatus.IPs) > 2 { - return "", fmt.Errorf("attachment for network %q with more than two IPs", attachmentName) - } - return netStatus.IPs[index].IP.String(), nil -} - func podIPsForDefaultNetwork(k8sClient clientset.Interface, podNamespace string, podName string) (string, string, error) { pod, err := k8sClient.CoreV1().Pods(podNamespace).Get(context.Background(), podName, metav1.GetOptions{}) if err != nil { @@ -2289,15 +2284,6 @@ func podIPsForDefaultNetwork(k8sClient clientset.Interface, podNamespace string, return ipv4, ipv6, nil } -func userDefinedNetworkStatus(pod *v1.Pod, networkName string) (PodAnnotation, error) { - netStatus, err := unmarshalPodAnnotation(pod.Annotations, networkName) - if err != nil { - return PodAnnotation{}, fmt.Errorf("failed to unmarshall annotations for pod %q: %v", pod.Name, err) - } - - return *netStatus, nil -} - func runUDNPod(cs clientset.Interface, namespace string, serverPodConfig podConfiguration, podSpecTweak func(*v1.Pod)) *v1.Pod { By(fmt.Sprintf("instantiating the UDN pod %s", serverPodConfig.name)) podSpec := generatePodSpec(serverPodConfig) @@ -2342,15 +2328,15 @@ func connectToServerViaDefaultNetwork(clientPodConfig podConfiguration, serverIP } // assertClientExternalConnectivity checks if the client can connect to an externally created IP outside the cluster -func assertClientExternalConnectivity(clientPodConfig podConfiguration, externalIpv4 string, externalIpv6 string, port uint16) { - if isIPv4Supported() { +func assertClientExternalConnectivity(cs clientset.Interface, clientPodConfig podConfiguration, externalIpv4 string, externalIpv6 string, port uint16) { + if isIPv4Supported(cs) { By("asserting the *client* pod can contact the server's v4 IP located outside the cluster") Eventually(func() error { return connectToServer(clientPodConfig, externalIpv4, port) }, 2*time.Minute, 6*time.Second).Should(Succeed()) } - if isIPv6Supported() { + if isIPv6Supported(cs) { By("asserting the *client* pod can contact the server's v6 IP located outside the cluster") Eventually(func() error { return connectToServer(clientPodConfig, externalIpv6, port) @@ -2358,15 +2344,15 @@ func assertClientExternalConnectivity(clientPodConfig podConfiguration, external } } -func expectedNumberOfRoutes(netConfig networkAttachmentConfigParams) int { +func expectedNumberOfRoutes(cs clientset.Interface, netConfig networkAttachmentConfigParams) int { if netConfig.topology == "layer2" { - if isIPv6Supported() && isIPv4Supported() { + if isIPv6Supported(cs) && isIPv4Supported(cs) { return 4 // 2 routes per family } else { return 2 //one family supported } } - if isIPv6Supported() && isIPv4Supported() { + if isIPv6Supported(cs) && isIPv4Supported(cs) { return 6 // 3 v4 routes + 3 v6 routes for UDN } return 3 //only one family, each has 3 routes diff --git a/test/e2e/network_segmentation_api_validations.go b/test/e2e/network_segmentation_api_validations.go index 0608485b3d..b3b29191fb 100644 --- a/test/e2e/network_segmentation_api_validations.go +++ b/test/e2e/network_segmentation_api_validations.go @@ -6,13 +6,13 @@ import ( e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" - "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" - testdatacudn "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata/cudn" + "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" + testscenariocudn "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario/cudn" ) var _ = Describe("Network Segmentation: API validations", func() { DescribeTable("api-server should reject invalid CRs", - func(scenarios []testdata.ValidateCRScenario) { + func(scenarios []testscenario.ValidateCRScenario) { DeferCleanup(func() { cleanupValidateCRsTest(scenarios) }) @@ -23,16 +23,16 @@ var _ = Describe("Network Segmentation: API validations", func() { Expect(stderr).To(ContainSubstring(s.ExpectedErr)) } }, - Entry("ClusterUserDefinedNetwork, mismatch topology and config", testdatacudn.MismatchTopologyConfig), - Entry("ClusterUserDefinedNetwork, localnet, invalid role", testdatacudn.LocalnetInvalidRole), - Entry("ClusterUserDefinedNetwork, localnet, invalid physicalNetworkName", testdatacudn.LocalnetInvalidPhyNetName), - Entry("ClusterUserDefinedNetwork, localnet, invalid subnets", testdatacudn.LocalnetInvalidSubnets), - Entry("ClusterUserDefinedNetwork, localnet, invalid mtu", testdatacudn.LocalnetInvalidMTU), - Entry("ClusterUserDefinedNetwork, localnet, invalid vlan", testdatacudn.LocalnetInvalidVLAN), + Entry("ClusterUserDefinedNetwork, mismatch topology and config", testscenariocudn.MismatchTopologyConfig), + Entry("ClusterUserDefinedNetwork, localnet, invalid role", testscenariocudn.LocalnetInvalidRole), + Entry("ClusterUserDefinedNetwork, localnet, invalid physicalNetworkName", testscenariocudn.LocalnetInvalidPhyNetName), + Entry("ClusterUserDefinedNetwork, localnet, invalid subnets", testscenariocudn.LocalnetInvalidSubnets), + Entry("ClusterUserDefinedNetwork, localnet, invalid mtu", testscenariocudn.LocalnetInvalidMTU), + Entry("ClusterUserDefinedNetwork, localnet, invalid vlan", testscenariocudn.LocalnetInvalidVLAN), ) DescribeTable("api-server should accept valid CRs", - func(scenarios []testdata.ValidateCRScenario) { + func(scenarios []testscenario.ValidateCRScenario) { DeferCleanup(func() { cleanupValidateCRsTest(scenarios) }) @@ -42,7 +42,7 @@ var _ = Describe("Network Segmentation: API validations", func() { Expect(err).NotTo(HaveOccurred(), "should create valid CR successfully") } }, - Entry("ClusterUserDefinedNetwork, localnet", testdatacudn.LocalnetValid), + Entry("ClusterUserDefinedNetwork, localnet", testscenariocudn.LocalnetValid), ) }) @@ -52,7 +52,7 @@ func runKubectlInputWithFullOutput(namespace string, data string, args ...string return e2ekubectl.NewKubectlCommand(namespace, args...).WithStdinData(data).ExecWithFullOutput() } -func cleanupValidateCRsTest(scenarios []testdata.ValidateCRScenario) { +func cleanupValidateCRsTest(scenarios []testscenario.ValidateCRScenario) { for _, s := range scenarios { e2ekubectl.RunKubectlInput("", s.Manifest, "delete", "-f", "-") } diff --git a/test/e2e/network_segmentation_endpointslices_mirror.go b/test/e2e/network_segmentation_endpointslices_mirror.go index 171073bdae..fddadb1cea 100644 --- a/test/e2e/network_segmentation_endpointslices_mirror.go +++ b/test/e2e/network_segmentation_endpointslices_mirror.go @@ -5,14 +5,14 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/util" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" nadclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" discoveryv1 "k8s.io/api/discovery/v1" @@ -23,7 +23,7 @@ import ( e2eservice "k8s.io/kubernetes/test/e2e/framework/service" ) -var _ = Describe("Network Segmentation EndpointSlices mirroring", func() { +var _ = Describe("Network Segmentation EndpointSlices mirroring", feature.NetworkSegmentation, func() { f := wrappedTestFramework("endpointslices-mirror") f.SkipNamespaceCreation = true Context("a user defined primary network", func() { @@ -60,6 +60,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", func() { ) { By("creating the network") netConfig.namespace = f.Namespace.Name + netConfig.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig.cidr) Expect(createNetworkFn(netConfig)).To(Succeed()) replicas := int32(3) @@ -125,7 +126,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", func() { networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, false, @@ -135,7 +136,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", func() { networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, false, @@ -145,7 +146,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", func() { networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, true, @@ -155,7 +156,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", func() { networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, true, @@ -195,6 +196,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", func() { Expect(err).NotTo(HaveOccurred()) By("creating the network") netConfig.namespace = defaultNetNamespace.Name + netConfig.cidr = filterCIDRsAndJoin(f.ClientSet, netConfig.cidr) Expect(createNetworkFn(netConfig)).To(Succeed()) replicas := int32(3) @@ -233,7 +235,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", func() { networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "secondary", }, ), @@ -242,7 +244,7 @@ var _ = Describe("Network Segmentation EndpointSlices mirroring", func() { networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "secondary", }, ), diff --git a/test/e2e/network_segmentation_localnet.go b/test/e2e/network_segmentation_localnet.go index a6b68db97c..3acd6b1c20 100644 --- a/test/e2e/network_segmentation_localnet.go +++ b/test/e2e/network_segmentation_localnet.go @@ -9,6 +9,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" + infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -21,20 +23,26 @@ import ( ) var _ = Describe("Network Segmentation: Localnet", func() { - f := wrappedTestFramework("network-segmentation-localnet") + var ( + f = wrappedTestFramework("network-segmentation-localnet") + providerCtx infraapi.Context + ) f.SkipNamespaceCreation = true + BeforeEach(func() { + providerCtx = infraprovider.Get().NewTestContext() + }) + It("using ClusterUserDefinedNetwork CR, pods in different namespaces, should communicate over localnet topology", func() { const ( - vlan = 200 - testPort = 9000 - subnetIPv4 = "192.168.100.0/24" - subnetIPv6 = "2001:dbb::/64" - excludeSubnetIPv4 = "192.168.100.0/29" - excludeSubnetIPv6 = "2001:dbb::/120" - secondaryIfaceName = "eth1" - ovsBrName = "ovsbr-eth1" + vlan = 200 + testPort = 9000 + subnetIPv4 = "192.168.100.0/24" + subnetIPv6 = "2001:dbb::/64" + excludeSubnetIPv4 = "192.168.100.0/29" + excludeSubnetIPv6 = "2001:dbb::/120" ) + ovsBrName := "ovsbr-udn" // use unique names to avoid conflicts with tests running in parallel nsBlue := uniqueMetaName("blue") nsRed := uniqueMetaName("red") @@ -42,14 +50,12 @@ var _ = Describe("Network Segmentation: Localnet", func() { physicalNetworkName := uniqueMetaName("localnet1") By("setup the localnet underlay") - ovsPods := ovsPods(f.ClientSet) - Expect(ovsPods).NotTo(BeEmpty()) - DeferCleanup(func() { - By("teardown the localnet underlay") - Expect(teardownUnderlay(ovsPods, ovsBrName)).To(Succeed()) - }) c := networkAttachmentConfig{networkAttachmentConfigParams: networkAttachmentConfigParams{networkName: physicalNetworkName, vlanID: vlan}} - Expect(setupUnderlay(ovsPods, ovsBrName, secondaryIfaceName, c.networkName, c.vlanID)).To(Succeed()) + Expect(providerCtx.SetupUnderlay(f, infraapi.Underlay{ + BridgeName: ovsBrName, + LogicalNetworkName: c.networkName, + VlanID: c.vlanID, + })).To(Succeed()) By("create test namespaces") _, err := f.ClientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: nsRed}}, metav1.CreateOptions{}) @@ -67,9 +73,10 @@ var _ = Describe("Network Segmentation: Localnet", func() { name: cudnName, physicalNetworkName: physicalNetworkName, vlanID: vlan, - cidr: correctCIDRFamily(subnetIPv4, subnetIPv6), - excludeCIDRs: selectCIDRs(excludeSubnetIPv4, excludeSubnetIPv6), + cidr: filterCIDRsAndJoin(f.ClientSet, joinCIDRs(subnetIPv4, subnetIPv6)), + excludeCIDRs: filterCIDRs(f.ClientSet, excludeSubnetIPv4, excludeSubnetIPv6), } + cudnYAML := newLocalnetCUDNYaml(netConf, nsBlue, nsRed) cleanup, err := createManifest("", cudnYAML) Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/network_segmentation_policy.go b/test/e2e/network_segmentation_policy.go index 10e2b0f0e7..2b71ebea5c 100644 --- a/test/e2e/network_segmentation_policy.go +++ b/test/e2e/network_segmentation_policy.go @@ -9,6 +9,7 @@ import ( nadclient "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" knet "k8s.io/api/networking/v1" @@ -18,7 +19,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" ) -var _ = ginkgo.Describe("Network Segmentation: Network Policies", func() { +var _ = ginkgo.Describe("Network Segmentation: Network Policies", feature.NetworkSegmentation, func() { f := wrappedTestFramework("network-segmentation") f.SkipNamespaceCreation = true @@ -34,6 +35,8 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", func() { randomStringLength = 5 nameSpaceYellowSuffix = "yellow" namespaceBlueSuffix = "blue" + namespaceRedSuffix = "red" + namespaceOrangeSuffix = "orange" ) var ( @@ -56,7 +59,10 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", func() { namespaceYellow := getNamespaceName(f, nameSpaceYellowSuffix) namespaceBlue := getNamespaceName(f, namespaceBlueSuffix) - for _, namespace := range []string{namespaceYellow, namespaceBlue} { + namespaceRed := getNamespaceName(f, namespaceRedSuffix) + namespaceOrange := getNamespaceName(f, namespaceOrangeSuffix) + for _, namespace := range []string{namespaceYellow, namespaceBlue, + namespaceRed, namespaceOrange} { ginkgo.By("Creating namespace " + namespace) ns, err := cs.CoreV1().Namespaces().Create(context.Background(), &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -79,6 +85,7 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", func() { ginkgo.By("Creating the attachment configuration") netConfig := newNetworkAttachmentConfig(netConfigParams) netConfig.namespace = f.Namespace.Name + netConfig.cidr = filterCIDRsAndJoin(cs, netConfig.cidr) _, err := nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create( context.Background(), generateNAD(netConfig), @@ -96,7 +103,7 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", func() { for i, cidr := range strings.Split(netConfig.cidr, ",") { if cidr != "" { ginkgo.By("asserting the server pod has an IP from the configured range") - serverIP, err = podIPsForUserDefinedPrimaryNetwork( + serverIP, err = getPodAnnotationIPsForAttachmentByIndex( cs, f.Namespace.Name, serverPodConfig.name, @@ -131,7 +138,7 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", func() { networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -151,7 +158,7 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", func() { networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, *podConfig( @@ -179,11 +186,13 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", func() { namespaceYellow := getNamespaceName(f, nameSpaceYellowSuffix) namespaceBlue := getNamespaceName(f, namespaceBlueSuffix) + namespaceRed := getNamespaceName(f, namespaceRedSuffix) + namespaceOrange := getNamespaceName(f, namespaceOrangeSuffix) nad := networkAttachmentConfigParams{ topology: topology, - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), - // Both yellow and blue namespaces are going to served by green network. + cidr: filterCIDRsAndJoin(cs, joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet)), + // The yellow, blue and red namespaces are going to served by green network. // Use random suffix for the network name to avoid race between tests. networkName: fmt.Sprintf("%s-%s", "green", rand.String(randomStringLength)), role: "primary", @@ -194,6 +203,7 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", func() { for _, namespace := range []string{namespaceYellow, namespaceBlue} { ginkgo.By("creating the attachment configuration for " + netConfName + " in namespace " + namespace) netConfig := newNetworkAttachmentConfig(nad) + netConfig.cidr = filterCIDRsAndJoin(cs, netConfig.cidr) netConfig.namespace = namespace netConfig.name = netConfName @@ -221,12 +231,12 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", func() { } subnet, err := getNetCIDRSubnet(cidr) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - allowServerPodIP, err = podIPsForUserDefinedPrimaryNetwork(cs, namespaceYellow, allowServerPodConfig.name, + allowServerPodIP, err = getPodAnnotationIPsForAttachmentByIndex(cs, namespaceYellow, allowServerPodConfig.name, namespacedName(namespaceYellow, netConfName), i) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("asserting the allow server pod IP %v is from the configured range %v", allowServerPodIP, cidr)) gomega.Expect(inRange(subnet, allowServerPodIP)).To(gomega.Succeed()) - denyServerPodIP, err = podIPsForUserDefinedPrimaryNetwork(cs, namespaceYellow, denyServerPodConfig.name, + denyServerPodIP, err = getPodAnnotationIPsForAttachmentByIndex(cs, namespaceYellow, denyServerPodConfig.name, namespacedName(namespaceYellow, netConfName), i) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("asserting the deny server pod IP %v is from the configured range %v", denyServerPodIP, cidr)) @@ -257,8 +267,8 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", func() { return reachServerPodFromClient(cs, denyServerPodConfig, clientPodConfig, denyServerPodIP, port) }, 1*time.Minute, 6*time.Second).ShouldNot(gomega.Succeed()) - ginkgo.By("creating a \"allow-traffic-to-pod\" network policy") - _, err = allowTrafficToPodFromNamespacePolicy(f, namespaceYellow, namespaceBlue, "allow-traffic-to-pod", allowServerPodLabel) + ginkgo.By("creating a \"allow-traffic-to-pod\" network policy for blue and red namespace") + _, err = allowTrafficToPodFromNamespacePolicy(f, namespaceYellow, namespaceBlue, namespaceRed, "allow-traffic-to-pod", allowServerPodLabel) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("asserting the *client* pod can contact the allow server pod exposed endpoint") @@ -271,6 +281,74 @@ var _ = ginkgo.Describe("Network Segmentation: Network Policies", func() { return reachServerPodFromClient(cs, denyServerPodConfig, clientPodConfig, denyServerPodIP, port) }, 1*time.Minute, 6*time.Second).ShouldNot(gomega.Succeed()) + // Create client pod in red namespace and check network policy is working. + ginkgo.By("creating client pod in red namespace and check if it is in pending state until NAD is created") + clientPodConfig.namespace = namespaceRed + podSpec := generatePodSpec(clientPodConfig) + _, err = cs.CoreV1().Pods(clientPodConfig.namespace).Create( + context.Background(), + podSpec, + metav1.CreateOptions{}, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Consistently(func() v1.PodPhase { + updatedPod, err := cs.CoreV1().Pods(clientPodConfig.namespace).Get(context.Background(), + clientPodConfig.name, metav1.GetOptions{}) + if err != nil { + return v1.PodFailed + } + return updatedPod.Status.Phase + }, 1*time.Minute, 6*time.Second).Should(gomega.Equal(v1.PodPending)) + + // The pod won't run and the namespace address set won't be created until the NAD for the network is added + // to the namespace and we test here that once that happens the policy is reconciled to account for it. + ginkgo.By("creating NAD for red and orange namespaces and check pod moves into running state") + for _, namespace := range []string{namespaceRed, namespaceOrange} { + ginkgo.By("creating the attachment configuration for " + netConfName + " in namespace " + namespace) + netConfig := newNetworkAttachmentConfig(nad) + netConfig.namespace = namespace + netConfig.name = netConfName + + _, err := nadClient.NetworkAttachmentDefinitions(namespace).Create( + context.Background(), + generateNAD(netConfig), + metav1.CreateOptions{}, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + gomega.Eventually(func() v1.PodPhase { + updatedPod, err := cs.CoreV1().Pods(clientPodConfig.namespace).Get(context.Background(), + clientPodConfig.name, metav1.GetOptions{}) + if err != nil { + return v1.PodFailed + } + return updatedPod.Status.Phase + }, 1*time.Minute, 6*time.Second).Should(gomega.Equal(v1.PodRunning)) + + ginkgo.By("asserting the *red client* pod can contact the allow server pod exposed endpoint") + gomega.Eventually(func() error { + return reachServerPodFromClient(cs, allowServerPodConfig, clientPodConfig, allowServerPodIP, port) + }, 1*time.Minute, 6*time.Second).Should(gomega.Succeed()) + + ginkgo.By("asserting the *red client* pod can not contact deny server pod exposed endpoint") + gomega.Eventually(func() error { + return reachServerPodFromClient(cs, denyServerPodConfig, clientPodConfig, denyServerPodIP, port) + }, 1*time.Minute, 6*time.Second).ShouldNot(gomega.Succeed()) + + // Create client pod in orange namespace now and check network policy is working. + ginkgo.By("creating client pod in orange namespace") + clientPodConfig.namespace = namespaceOrange + runUDNPod(cs, namespaceOrange, clientPodConfig, nil) + + ginkgo.By("asserting the *orange client* pod can not contact the allow server pod exposed endpoint") + gomega.Eventually(func() error { + return reachServerPodFromClient(cs, allowServerPodConfig, clientPodConfig, allowServerPodIP, port) + }, 1*time.Minute, 6*time.Second).ShouldNot(gomega.Succeed()) + + ginkgo.By("asserting the *orange client* pod can not contact deny server pod exposed endpoint") + gomega.Eventually(func() error { + return reachServerPodFromClient(cs, denyServerPodConfig, clientPodConfig, denyServerPodIP, port) + }, 1*time.Minute, 6*time.Second).ShouldNot(gomega.Succeed()) }, ginkgo.Entry( "in L2 primary UDN", @@ -327,7 +405,7 @@ func getNamespaceName(f *framework.Framework, nsSuffix string) string { return fmt.Sprintf("%s-%s", f.Namespace.Name, nsSuffix) } -func allowTrafficToPodFromNamespacePolicy(f *framework.Framework, namespace, fromNamespace, policyName string, podLabel map[string]string) (*knet.NetworkPolicy, error) { +func allowTrafficToPodFromNamespacePolicy(f *framework.Framework, namespace, fromNamespace1, fromNamespace2, policyName string, podLabel map[string]string) (*knet.NetworkPolicy, error) { policy := &knet.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: policyName, @@ -336,7 +414,8 @@ func allowTrafficToPodFromNamespacePolicy(f *framework.Framework, namespace, fro PodSelector: metav1.LabelSelector{MatchLabels: podLabel}, PolicyTypes: []knet.PolicyType{knet.PolicyTypeIngress}, Ingress: []knet.NetworkPolicyIngressRule{{From: []knet.NetworkPolicyPeer{ - {NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/metadata.name": fromNamespace}}}}}}, + {NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/metadata.name": fromNamespace1}}}, + {NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/metadata.name": fromNamespace2}}}}}}, }, } return f.ClientSet.NetworkingV1().NetworkPolicies(namespace).Create(context.TODO(), policy, metav1.CreateOptions{}) diff --git a/test/e2e/network_segmentation_services.go b/test/e2e/network_segmentation_services.go index d580bc190f..8d2678c178 100644 --- a/test/e2e/network_segmentation_services.go +++ b/test/e2e/network_segmentation_services.go @@ -12,6 +12,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" @@ -30,7 +31,7 @@ import ( utilnet "k8s.io/utils/net" ) -var _ = Describe("Network Segmentation: services", func() { +var _ = Describe("Network Segmentation: services", feature.NetworkSegmentation, func() { f := wrappedTestFramework("udn-services") f.SkipNamespaceCreation = true @@ -112,6 +113,7 @@ var _ = Describe("Network Segmentation: services", func() { By("Creating the attachment configuration") netConfig := newNetworkAttachmentConfig(netConfigParams) netConfig.namespace = f.Namespace.Name + netConfig.cidr = filterCIDRsAndJoin(cs, netConfig.cidr) _, err = nadClient.NetworkAttachmentDefinitions(f.Namespace.Name).Create( context.Background(), generateNAD(netConfig), @@ -267,7 +269,7 @@ ips=$(ip -o addr show dev $iface| grep global |awk '{print $4}' | cut -d/ -f1 | networkAttachmentConfigParams{ name: nadName, topology: "layer3", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, ), @@ -276,7 +278,7 @@ ips=$(ip -o addr show dev $iface| grep global |awk '{print $4}' | cut -d/ -f1 | networkAttachmentConfigParams{ name: nadName, topology: "layer2", - cidr: correctCIDRFamily(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), + cidr: joinCIDRs(userDefinedNetworkIPv4Subnet, userDefinedNetworkIPv6Subnet), role: "primary", }, ), diff --git a/test/e2e/network_segmentation_utils.go b/test/e2e/network_segmentation_utils.go new file mode 100644 index 0000000000..960b6889c7 --- /dev/null +++ b/test/e2e/network_segmentation_utils.go @@ -0,0 +1,22 @@ +package e2e + +import ( + "k8s.io/client-go/kubernetes" + "k8s.io/utils/net" +) + +// podIPsForUserDefinedPrimaryNetwork returns the v4 or v6 IPs for a pod on the UDN +func getPodAnnotationIPsForPrimaryNetworkByIPFamily(k8sClient kubernetes.Interface, podNamespace string, podName string, networkName string, family net.IPFamily) (string, error) { + if networkName != "default" { + networkName = namespacedName(podNamespace, networkName) + } + ipnets, err := getPodAnnotationIPsForAttachment(k8sClient, podNamespace, podName, networkName) + if err != nil { + return "", err + } + ipnet := getFirstCIDROfFamily(family, ipnets) + if ipnet == nil { + return "", nil + } + return ipnet.IP.String(), nil +} diff --git a/test/e2e/node_ip_mac_migration.go b/test/e2e/node_ip_mac_migration.go index d84ce6d737..19626e50e6 100644 --- a/test/e2e/node_ip_mac_migration.go +++ b/test/e2e/node_ip_mac_migration.go @@ -18,6 +18,7 @@ import ( . "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" @@ -35,7 +36,7 @@ import ( utilnet "k8s.io/utils/net" ) -var _ = Describe("Node IP and MAC address migration", func() { +var _ = Describe("Node IP and MAC address migration", feature.NodeIPMACMigration, func() { const ( namespacePrefix = "node-ip-migration" podWorkerNodeName = "primary" @@ -131,7 +132,7 @@ spec: framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer = infraapi.ExternalContainer{Name: externalContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external container") externalContainerIPs[4], externalContainerIPs[6] = externalContainer.GetIPv4(), externalContainer.GetIPv6() @@ -453,7 +454,7 @@ spec: Expect(pods.Items).To(HaveLen(1)) ovnkPod = pods.Items[0] - cmd := "ovs-ofctl dump-flows breth0 table=0" + cmd := fmt.Sprintf("ovs-ofctl dump-flows %s table=0", deploymentconfig.Get().ExternalBridgeName()) err = wait.PollImmediate(framework.Poll, 30*time.Second, func() (bool, error) { stdout, err := e2epodoutput.RunHostCmdWithRetries(ovnkPod.Namespace, ovnkPod.Name, cmd, framework.Poll, 30*time.Second) if err != nil { @@ -514,7 +515,7 @@ spec: time.Sleep(time.Duration(settleTimeout) * time.Second) By(fmt.Sprintf("Checking nodeport flows have been updated to use new IP: %s", migrationWorkerNodeIP)) - cmd := "ovs-ofctl dump-flows breth0 table=0" + cmd := fmt.Sprintf("ovs-ofctl dump-flows %s table=0", deploymentconfig.Get().ExternalBridgeName()) err = wait.PollImmediate(framework.Poll, 30*time.Second, func() (bool, error) { stdout, err := e2epodoutput.RunHostCmdWithRetries(ovnkPod.Namespace, ovnkPod.Name, cmd, framework.Poll, 30*time.Second) if err != nil { @@ -627,7 +628,7 @@ func checkFlowsForMACPeriodically(ovnkPod v1.Pod, addr net.HardwareAddr, duratio } func checkFlowsForMAC(ovnkPod v1.Pod, mac net.HardwareAddr) error { - cmd := "ovs-ofctl dump-flows breth0" + cmd := fmt.Sprintf("ovs-ofctl dump-flows %s", deploymentconfig.Get().ExternalBridgeName()) flowOutput := e2epodoutput.RunHostCmdOrDie(ovnkPod.Namespace, ovnkPod.Name, cmd) lines := strings.Split(flowOutput, "\n") for _, line := range lines { @@ -952,26 +953,31 @@ func migrateWorkerNodeIP(nodeName, fromIP, targetIP string, invertOrder bool) (e // Define a function to change the IP address for later use. changeIPAddress := func() error { - // Add new IP first - this will preserve the default route. newIPMask := targetIP + "/" + mask - framework.Logf("Adding new IP address %s to node %s", newIPMask, nodeName) - // Add cleanup command. - cleanupCmd := []string{"ip", "address", "del", newIPMask, "dev", iface} + + // Delete current IP address. If you add a second ip from the same subnet to an interface, it will + // be considered a secondary IP address and will be deleted together with the primary (aka old) IP. + framework.Logf("Deleting current IP address %s from node %s", parsedNetIPMask.String(), nodeName) + // Add cleanup command to add original IP back to the end of the cleanupCommands list. + // This way, we preserve first delete then add new IP sequence. + cleanupCmd := []string{"ip", "address", "add", parsedNetIPMask.String(), "dev", iface} cleanupCommands = append(cleanupCommands, cleanupCmd) // Run command. - _, err = infraprovider.Get().ExecK8NodeCommand(nodeName, []string{"ip", "address", "add", newIPMask, "dev", iface}) + _, err = infraprovider.Get().ExecK8NodeCommand(nodeName, []string{"ip", "address", "del", parsedNetIPMask.String(), "dev", iface}) if err != nil { - return fmt.Errorf("failed to add new IP %s to interface %s on node %s: %v", newIPMask, iface, nodeName, err) + return err } - // Delete current IP address. On rollback, first add the old IP and then delete the new one. - framework.Logf("Deleting current IP address %s from node %s", parsedNetIPMask.String(), nodeName) - // Add cleanup command. - cleanupCmd = []string{"ip", "address", "add", parsedNetIPMask.String(), "dev", iface} + + // Now add new IP. + framework.Logf("Adding new IP address %s to node %s", newIPMask, nodeName) + // Add cleanup command to remove the new IP address to the beginning of the cleanupCommands list. + cleanupCmd = []string{"ip", "address", "del", newIPMask, "dev", iface} cleanupCommands = append([][]string{cleanupCmd}, cleanupCommands...) + // Run command. - _, err = infraprovider.Get().ExecK8NodeCommand(nodeName, []string{"ip", "address", "del", parsedNetIPMask.String(), "dev", iface}) + _, err = infraprovider.Get().ExecK8NodeCommand(nodeName, []string{"ip", "address", "add", newIPMask, "dev", iface}) if err != nil { - return err + return fmt.Errorf("failed to add new IP %s to interface %s on node %s: %v", newIPMask, iface, nodeName, err) } return nil } diff --git a/test/e2e/ovspinning.go b/test/e2e/ovspinning.go index af72285ead..f3d94b530b 100644 --- a/test/e2e/ovspinning.go +++ b/test/e2e/ovspinning.go @@ -7,13 +7,14 @@ import ( "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" ) -var _ = ginkgo.Describe("OVS CPU affinity pinning", func() { +var _ = ginkgo.Describe("OVS CPU affinity pinning", feature.OVSCPUPin, func() { f := wrappedTestFramework("ovspinning") diff --git a/test/e2e/pod.go b/test/e2e/pod.go index f5b7b12aae..c9a5e5efb7 100644 --- a/test/e2e/pod.go +++ b/test/e2e/pod.go @@ -105,15 +105,15 @@ var _ = ginkgo.Describe("Pod to external server PMTUD", func() { providerPrimaryNetwork, err := infraprovider.Get().PrimaryNetwork() framework.ExpectNoError(err, "failed to get provider primary network") externalContainer = infraapi.ExternalContainer{Name: externalContainerName, Image: images.AgnHost(), Network: providerPrimaryNetwork, - Args: []string{"netexec", "--http-port", fmt.Sprintf("%d", externalContainerPort), "--udp-port", fmt.Sprintf("%d", externalContainerPort)}, + CmdArgs: []string{"netexec", "--http-port", fmt.Sprintf("%d", externalContainerPort), "--udp-port", fmt.Sprintf("%d", externalContainerPort)}, ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external container (%s)", externalContainer) - if isIPv4Supported() { + if isIPv4Supported(f.ClientSet) { gomega.Expect(externalContainer.GetIPv4()).ToNot(gomega.BeEmpty()) externalContainerIPs = append(externalContainerIPs, externalContainer.GetIPv4()) } - if isIPv6Supported() { + if isIPv6Supported(f.ClientSet) { gomega.Expect(externalContainer.GetIPv6()).ToNot(gomega.BeEmpty()) externalContainerIPs = append(externalContainerIPs, fmt.Sprintf("[%s]", externalContainer.GetIPv6())) } @@ -155,7 +155,7 @@ var _ = ginkgo.Describe("Pod to external server PMTUD", func() { primaryInf, err := infraprovider.Get().GetK8NodeNetworkInterface(clientPodNodeName, providerPrimaryNetwork) framework.ExpectNoError(err, "failed to get provider primary network interface info") clientnodeIP := primaryInf.IPv4 - if IsIPv6Cluster(f.ClientSet) && isIPv6Supported() { + if IsIPv6Cluster(f.ClientSet) && isIPv6Supported(f.ClientSet) { clientnodeIP = fmt.Sprintf("[%s]", primaryInf.IPv6) } gomega.Expect(clientnodeIP).NotTo(gomega.BeEmpty()) diff --git a/test/e2e/route_advertisements.go b/test/e2e/route_advertisements.go index bee77d639f..36c0c5c950 100644 --- a/test/e2e/route_advertisements.go +++ b/test/e2e/route_advertisements.go @@ -2,9 +2,14 @@ package e2e import ( "context" + "embed" "fmt" + "math/rand" "net" + "os" + "path/filepath" "strings" + "text/template" "time" @@ -15,13 +20,23 @@ import ( apitypes "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/types" udnv1 "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1" udnclientset "github.com/ovn-org/ovn-kubernetes/go-controller/pkg/crd/userdefinednetwork/v1/apis/clientset/versioned" + "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" + "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" + "github.com/ovn-org/ovn-kubernetes/test/e2e/label" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -32,13 +47,14 @@ import ( utilnet "k8s.io/utils/net" ) -var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is advertised", func() { - const ( - serverContainerName = "bgpserver" - routerContainerName = "frr" - echoClientPodName = "echo-client-pod" - bgpExternalNetworkName = "bgpnet" - ) +const ( + serverContainerName = "bgpserver" + routerContainerName = "frr" + echoClientPodName = "echo-client-pod" + bgpExternalNetworkName = "bgpnet" +) + +var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is advertised", feature.RouteAdvertisements, func() { var serverContainerIPs []string var frrContainerIPv4, frrContainerIPv6 string var nodes *corev1.NodeList @@ -51,10 +67,10 @@ var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is bgpServer := infraapi.ExternalContainer{Name: serverContainerName} networkInterface, err := infraprovider.Get().GetExternalContainerNetworkInterface(bgpServer, bgpNetwork) framework.ExpectNoError(err, "container %s attached to network %s must contain network info", serverContainerName, bgpExternalNetworkName) - if isIPv4Supported() && len(networkInterface.IPv4) > 0 { + if isIPv4Supported(f.ClientSet) && len(networkInterface.IPv4) > 0 { serverContainerIPs = append(serverContainerIPs, networkInterface.IPv4) } - if isIPv6Supported() && len(networkInterface.IPv6) > 0 { + if isIPv6Supported(f.ClientSet) && len(networkInterface.IPv6) > 0 { serverContainerIPs = append(serverContainerIPs, networkInterface.IPv6) } framework.Logf("The external server IPs are: %+v", serverContainerIPs) @@ -217,7 +233,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is 60*time.Second) framework.ExpectNoError(err, fmt.Sprintf("Testing pod to external traffic failed: %v", err)) expectedPodIP := podv4IP - if isIPv6Supported() && utilnet.IsIPv6String(serverContainerIP) { + if isIPv6Supported(f.ClientSet) && utilnet.IsIPv6String(serverContainerIP) { expectedPodIP = podv6IP // For IPv6 addresses, need to handle the brackets in the output outputIP := strings.TrimPrefix(strings.Split(stdout, "]:")[0], "[") @@ -233,14 +249,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when default podNetwork is }) }) -var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advertised", func() { - const ( - serverContainerName = "bgpserver" - routerContainerName = "frr" - echoClientPodName = "echo-client-pod" - bgpExternalNetworkName = "bgpnet" - placeholder = "PLACEHOLDER_NAMESPACE" - ) +var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advertised", feature.RouteAdvertisements, func() { var serverContainerIPs []string var frrContainerIPv4, frrContainerIPv6 string var nodes *corev1.NodeList @@ -265,10 +274,10 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert bgpServer := infraapi.ExternalContainer{Name: serverContainerName} networkInterface, err := infraprovider.Get().GetExternalContainerNetworkInterface(bgpServer, bgpNetwork) framework.ExpectNoError(err, "container %s attached to network %s must contain network info", serverContainerName, bgpExternalNetworkName) - if isIPv4Supported() && len(networkInterface.IPv4) > 0 { + if isIPv4Supported(f.ClientSet) && len(networkInterface.IPv4) > 0 { serverContainerIPs = append(serverContainerIPs, networkInterface.IPv4) } - if isIPv6Supported() && len(networkInterface.IPv6) > 0 { + if isIPv6Supported(f.ClientSet) && len(networkInterface.IPv6) > 0 { serverContainerIPs = append(serverContainerIPs, networkInterface.IPv6) } gomega.Expect(len(serverContainerIPs)).Should(gomega.BeNumerically(">", 0), "failed to find external container IPs") @@ -297,15 +306,16 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert Values: []string{f.Namespace.Name}, }}} - if IsGatewayModeLocal() && cudnTemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 { - e2eskipper.Skipf( - "BGP for L2 networks on LGW is currently unsupported", - ) - } // Create CUDN ginkgo.By("create ClusterUserDefinedNetwork") udnClient, err := udnclientset.NewForConfig(f.ClientConfig()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if cudnTemplate.Spec.Network.Layer3 != nil { + cudnTemplate.Spec.Network.Layer3.Subnets = filterL3Subnets(f.ClientSet, cudnTemplate.Spec.Network.Layer3.Subnets) + } + if cudnTemplate.Spec.Network.Layer2 != nil { + cudnTemplate.Spec.Network.Layer2.Subnets = filterDualStackCIDRs(f.ClientSet, cudnTemplate.Spec.Network.Layer2.Subnets) + } cUDN, err := udnClient.K8sV1().ClusterUserDefinedNetworks().Create(context.Background(), cudnTemplate, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.DeferCleanup(func() { @@ -398,7 +408,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert ginkgo.By("queries to the external server are not SNATed (uses podIP)") for _, serverContainerIP := range serverContainerIPs { - podIP, err := podIPsForUserDefinedPrimaryNetwork(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 0) + podIP, err := getPodAnnotationIPsForAttachmentByIndex(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 0) gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err, fmt.Sprintf("Getting podIPs for pod %s failed: %v", clientPod.Name, err)) framework.Logf("Client pod IP address=%s", podIP) @@ -416,8 +426,8 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert framework.Poll, 60*time.Second) framework.ExpectNoError(err, fmt.Sprintf("Testing pod to external traffic failed: %v", err)) - if isIPv6Supported() && utilnet.IsIPv6String(serverContainerIP) { - podIP, err = podIPsForUserDefinedPrimaryNetwork(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 1) + if isIPv6Supported(f.ClientSet) && utilnet.IsIPv6String(serverContainerIP) { + podIP, err = getPodAnnotationIPsForAttachmentByIndex(f.ClientSet, f.Namespace.Name, clientPod.Name, namespacedName(f.Namespace.Name, cUDN.Name), 1) // For IPv6 addresses, need to handle the brackets in the output outputIP := strings.TrimPrefix(strings.Split(stdout, "]:")[0], "[") gomega.Expect(outputIP).To(gomega.Equal(podIP), @@ -440,13 +450,13 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert Topology: udnv1.NetworkTopologyLayer3, Layer3: &udnv1.Layer3Config{ Role: "Primary", - Subnets: generateL3Subnets(udnv1.Layer3Subnet{ + Subnets: []udnv1.Layer3Subnet{{ CIDR: "103.103.0.0/16", HostSubnet: 24, - }, udnv1.Layer3Subnet{ + }, { CIDR: "2014:100:200::0/60", HostSubnet: 64, - }), + }}, }, }, }, @@ -485,7 +495,7 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert Topology: udnv1.NetworkTopologyLayer2, Layer2: &udnv1.Layer2Config{ Role: "Primary", - Subnets: generateL2Subnets("103.0.0.0/16", "2014:100::0/60"), + Subnets: udnv1.DualStackCIDRs{"103.0.0.0/16", "2014:100::0/60"}, }, }, }, @@ -516,9 +526,13 @@ var _ = ginkgo.Describe("BGP: Pod to external server when CUDN network is advert ) }) -var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks", +var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks", feature.RouteAdvertisements, func(cudnATemplate, cudnBTemplate *udnv1.ClusterUserDefinedNetwork) { const curlConnectionTimeoutCode = "28" + const ( + ipFamilyV4 = iota + ipFamilyV6 + ) f := wrappedTestFramework("bpp-network-isolation") f.SkipNamespaceCreation = true @@ -532,11 +546,8 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" var svcNetA, svcNetB, svcNetDefault *corev1.Service var cudnA, cudnB *udnv1.ClusterUserDefinedNetwork var ra *rav1.RouteAdvertisements - + var hostNetworkPort int ginkgo.BeforeEach(func() { - if cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 && isLocalGWModeEnabled() { - e2eskipper.Skipf("Advertising Layer2 UDNs is not currently supported in LGW") - } ginkgo.By("Configuring primary UDN namespaces") var err error udnNamespaceA, err = f.CreateNamespace(context.TODO(), f.BaseName, map[string]string{ @@ -570,6 +581,19 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" udnClient, err := udnclientset.NewForConfig(f.ClientConfig()) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + if cudnATemplate.Spec.Network.Layer3 != nil { + cudnATemplate.Spec.Network.Layer3.Subnets = filterL3Subnets(f.ClientSet, cudnATemplate.Spec.Network.Layer3.Subnets) + } + if cudnATemplate.Spec.Network.Layer2 != nil { + cudnATemplate.Spec.Network.Layer2.Subnets = filterDualStackCIDRs(f.ClientSet, cudnATemplate.Spec.Network.Layer2.Subnets) + } + if cudnBTemplate.Spec.Network.Layer3 != nil { + cudnBTemplate.Spec.Network.Layer3.Subnets = filterL3Subnets(f.ClientSet, cudnBTemplate.Spec.Network.Layer3.Subnets) + } + if cudnBTemplate.Spec.Network.Layer2 != nil { + cudnBTemplate.Spec.Network.Layer2.Subnets = filterDualStackCIDRs(f.ClientSet, cudnBTemplate.Spec.Network.Layer2.Subnets) + } + cudnA, err = udnClient.K8sV1().ClusterUserDefinedNetworks().Create(context.Background(), cudnATemplate, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -584,6 +608,30 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" nodes, err = e2enode.GetReadySchedulableNodes(context.TODO(), f.ClientSet) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(len(nodes.Items)).To(gomega.BeNumerically(">", 2)) + // create host networked pod + ginkgo.By("Creating host network pods on each node") + // get random port in case the test retries and port is already in use on host node + min := 25000 + max := 25999 + hostNetworkPort = rand.Intn(max-min+1) + min + framework.Logf("Random host networked port chosen: %d", hostNetworkPort) + for _, node := range nodes.Items { + // this creates a udp / http netexec listener which is able to receive the "hostname" + // command. We use this to validate that each endpoint is received at least once + args := []string{ + "netexec", + fmt.Sprintf("--http-port=%d", hostNetworkPort), + fmt.Sprintf("--udp-port=%d", hostNetworkPort), + } + + // create host networked Pods + _, err := createPod(f, node.Name+"-hostnet-ep", node.Name, f.Namespace.Name, []string{}, map[string]string{}, func(p *corev1.Pod) { + p.Spec.Containers[0].Args = args + p.Spec.HostNetwork = true + }) + + framework.ExpectNoError(err) + } ginkgo.By("Setting up pods and services") podsNetA = []*corev1.Pod{} @@ -603,6 +651,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" svc.Spec.Ports = []corev1.ServicePort{{Port: 8080}} familyPolicy := corev1.IPFamilyPolicyPreferDualStack svc.Spec.IPFamilyPolicy = &familyPolicy + svc.Spec.Type = corev1.ServiceTypeNodePort svcNetA, err = f.ClientSet.CoreV1().Services(pod.Namespace).Create(context.Background(), svc, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -626,6 +675,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" svc.Name = fmt.Sprintf("service-default") svc.Namespace = "default" svc.Spec.Selector = pod.Labels + svc.Spec.Type = corev1.ServiceTypeNodePort svcNetDefault, err = f.ClientSet.CoreV1().Services(pod.Namespace).Create(context.Background(), svc, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -674,9 +724,6 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" }) ginkgo.AfterEach(func() { - if cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 && isLocalGWModeEnabled() { - return - } gomega.Expect(f.ClientSet.CoreV1().Pods(udnNamespaceA.Name).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{})).To(gomega.Succeed()) gomega.Expect(f.ClientSet.CoreV1().Pods(udnNamespaceB.Name).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{})).To(gomega.Succeed()) @@ -708,6 +755,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" } if svcNetDefault != nil { err = f.ClientSet.CoreV1().Services(svcNetDefault.Namespace).Delete(context.Background(), svcNetDefault.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) svcNetDefault = nil } @@ -753,7 +801,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" framework.Logf("Connectivity check successful:'%s' -> %s", client, targetAddress) return out, nil } - clientName, clientNamespace, dst, expectedOutput, expectErr := connInfo(0) + clientName, clientNamespace, dst, expectedOutput, expectErr := connInfo(ipFamilyV4) asyncAssertion := gomega.Eventually timeout := time.Second * 30 @@ -772,9 +820,9 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" return fmt.Errorf("expected connectivity check to contain %q, got %q", expectedOutput, out) } } - if isIPv6Supported() && isIPv4Supported() { + if isIPv6Supported(f.ClientSet) && isIPv4Supported(f.ClientSet) { // use ipFamilyIndex of 1 to pick the IPv6 addresses - clientName, clientNamespace, dst, expectedOutput, expectErr := connInfo(1) + clientName, clientNamespace, dst, expectedOutput, expectErr := connInfo(ipFamilyV6) out, err := checkConnectivity(clientName, clientNamespace, dst) if expectErr != (err != nil) { return fmt.Errorf("expected connectivity check to return error(%t), got %v, output %v", expectErr, err, out) @@ -794,9 +842,9 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podsNetA[0] srvPod := podsNetA[1] - clientPodStatus, err := userDefinedNetworkStatus(clientPod, namespacedName(clientPod.Namespace, cudnATemplate.Name)) + clientPodStatus, err := getPodAnnotationForAttachment(clientPod, namespacedName(clientPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", clientPodStatus.IPs[ipFamilyIndex].IP.String(), false }), @@ -806,9 +854,9 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podsNetA[0] srvPod := podsNetA[2] - clientPodStatus, err := userDefinedNetworkStatus(clientPod, namespacedName(clientPod.Namespace, cudnATemplate.Name)) + clientPodStatus, err := getPodAnnotationForAttachment(clientPod, namespacedName(clientPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", clientPodStatus.IPs[ipFamilyIndex].IP.String(), false }), @@ -818,7 +866,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podsNetA[2] srvPod := podNetB - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -829,7 +877,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podsNetA[0] srvPod := podNetB - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -839,7 +887,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podNetDefault srvPod := podNetB - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnBTemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -849,7 +897,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientPod := podNetDefault srvPod := podsNetA[0] - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientPod.Name, clientPod.Namespace, net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -863,16 +911,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" }), ginkgo.Entry("pod in the UDN should not be able to access a default network service", func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { - err := true - out := curlConnectionTimeoutCode - if cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 { - // FIXME: prevent looping of traffic in L2 UDNs - // bad behaviour: packet is looping from management port -> breth0 -> GR -> management port -> breth0 and so on - // which is a never ending loop - // this causes curl timeout with code 7 host unreachable instead of code 28 - out = "" - } - return podsNetA[0].Name, podsNetA[0].Namespace, net.JoinHostPort(svcNetDefault.Spec.ClusterIPs[ipFamilyIndex], "8080") + "/clientip", out, err + return podsNetA[0].Name, podsNetA[0].Namespace, net.JoinHostPort(svcNetDefault.Spec.ClusterIPs[ipFamilyIndex], "8080") + "/clientip", curlConnectionTimeoutCode, true }), ginkgo.Entry("pod in the UDN should be able to access kapi in default network service", func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { @@ -887,7 +926,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientNode := podsNetA[0].Spec.NodeName srvPod := podsNetA[0] - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientNode, "", net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), @@ -897,10 +936,154 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" clientNode := podsNetA[2].Spec.NodeName srvPod := podsNetA[0] - srvPodStatus, err := userDefinedNetworkStatus(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) + srvPodStatus, err := getPodAnnotationForAttachment(srvPod, namespacedName(srvPod.Namespace, cudnATemplate.Name)) framework.ExpectNoError(err) return clientNode, "", net.JoinHostPort(srvPodStatus.IPs[ipFamilyIndex].IP.String(), "8080") + "/clientip", curlConnectionTimeoutCode, true }), + ginkgo.Entry("UDN pod to local node should not work", + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), clientPod.Spec.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + // FIXME: add the host process socket to the VRF for this test to work. + // This scenario is something that is not supported yet. So the test will continue to fail. + // This works the same on both normal UDNs and advertised UDNs. + // So because the process is not bound to the VRF, packet reaches the host but kernel sends a RESET. So its not code 28 but code7. + // 10:59:55.351067 319594f193d4d_3 P ifindex 191 0a:58:5d:5d:01:05 ethertype IPv4 (0x0800), length 80: (tos 0x0, ttl 64, id 57264, + // offset 0, flags [DF], proto TCP (6), length 60) + // 93.93.1.5.36363 > 172.18.0.2.25022: Flags [S], cksum 0x0aa5 (incorrect -> 0xe0b7), seq 3879759281, win 65280, + // options [mss 1360,sackOK,TS val 3006752321 ecr 0,nop,wscale 7], length 0 + // 10:59:55.352404 ovn-k8s-mp87 In ifindex 186 0a:58:5d:5d:01:01 ethertype IPv4 (0x0800), length 80: (tos 0x0, ttl 63, id 57264, + // offset 0, flags [DF], proto TCP (6), length 60) + // 169.154.169.12.36363 > 172.18.0.2.25022: Flags [S], cksum 0xe0b7 (correct), seq 3879759281, win 65280, + // options [mss 1360,sackOK,TS val 3006752321 ecr 0,nop,wscale 7], length 0 + // 10:59:55.352461 ovn-k8s-mp87 Out ifindex 186 0a:58:5d:5d:01:02 ethertype IPv4 (0x0800), length 60: (tos 0x0, ttl 64, id 0, + // offset 0, flags [DF], proto TCP (6), length 40) + // 172.18.0.2.25022 > 169.154.169.12.36363: Flags [R.], cksum 0x609d (correct), seq 0, ack 3879759282, win 0, length 0 + // 10:59:55.352927 319594f193d4d_3 Out ifindex 191 0a:58:5d:5d:01:02 ethertype IPv4 (0x0800), length 60: (tos 0x0, ttl 64, id 0, + // offset 0, flags [DF], proto TCP (6), length 40) + // 172.18.0.2.25022 > 93.93.1.5.36363: Flags [R.], cksum 0x609d (correct), seq 0, ack 1, win 0, length 0 + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(hostNetworkPort)) + "/hostname", "", true + }), + ginkgo.Entry("UDN pod to a different node should work", + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + // podsNetA[0] and podsNetA[2] are on different nodes so we can pick the node of podsNetA[2] as the different node destination + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), podsNetA[2].Spec.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + + clientNode, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), clientPod.Spec.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + clientNodeIP := clientNode.Status.Addresses[ipFamilyIndex].Address + // pod -> node traffic should use the node's IP as the source for advertised UDNs. + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(hostNetworkPort)) + "/clientip", clientNodeIP, false + }), + ginkgo.Entry("UDN pod to the same node nodeport service in default network should not work", + // FIXME: https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5410 + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + // podsNetA[0] is on nodes[0]. We need the same node. Let's hit the nodeport on nodes[0]. + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[0].Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodePort := svcNetDefault.Spec.Ports[0].NodePort + + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", curlConnectionTimeoutCode, true + }), + ginkgo.Entry("UDN pod to a different node nodeport service in default network should work", + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + // podsNetA[0] is on nodes[0]. We need a different node. podNetDefault is on nodes[1]. + // The service is backed by podNetDefault. Let's hit the nodeport on nodes[2]. + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[2].Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodePort := svcNetDefault.Spec.Ports[0].NodePort + + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", "", false + }), + ginkgo.Entry("UDN pod to the same node nodeport service in same UDN network should work", + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + // The service is backed by pods in podsNetA. + // We want to hit the nodeport on the same node. + // client is on nodes[0]. Let's hit nodeport on nodes[0]. + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[0].Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodePort := svcNetA.Spec.Ports[0].NodePort + + // The service can be backed by any of the pods in podsNetA, so we can't reliably check the output hostname. + // Just check that the connection is successful. + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", "", false + }), + ginkgo.Entry("UDN pod to a different node nodeport service in same UDN network should work", + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + // The service is backed by pods in podsNetA. + // We want to hit the nodeport on a different node. + // client is on nodes[0]. Let's hit nodeport on nodes[2]. + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[2].Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodePort := svcNetA.Spec.Ports[0].NodePort + + // sourceIP will be joinSubnetIP for nodeports, so only using hostname endpoint + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", "", false + }), + ginkgo.Entry("UDN pod to the same node nodeport service in different UDN network should not work", + // FIXME: This test should work: https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5419 + // This traffic flow is expected to work eventually but doesn't work today on Layer3 (v4 and v6) and Layer2 (v4 only) networks. + // Reason it doesn't work today is because UDN networks don't have MAC bindings for masqueradeIPs of other networks. + // Traffic flow: UDN pod in network A -> samenode nodeIP:nodePort service of networkB + // UDN pod in networkA -> ovn-switch -> ovn-cluster-router (SNAT to masqueradeIP of networkA) -> mpX interface -> + // enters the host and hits IPTables rules to DNAT to clusterIP:Port of service of networkB. + // Then it hits the pkt_mark flows on breth0 and get's sent into networkB's patchport where it hits the GR. + // On the GR we DNAT to backend pod and SNAT to joinIP. + // Reply: Pod replies and now OVN in networkB tries to ARP for the masqueradeIP of networkA which is the source and simply + // fails as it doesn't know how to reach this masqueradeIP. + // There is also inconsistency in behaviour within Layer2 networks for how IPv4 works and how IPv6 works where the traffic + // works on ipv6 because of the flows described below. + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[0].Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodePort := svcNetB.Spec.Ports[0].NodePort + out := curlConnectionTimeoutCode + errBool := true + if ipFamilyIndex == ipFamilyV6 && cudnATemplate.Spec.Network.Topology == udnv1.NetworkTopologyLayer2 { + // For Layer2 networks, we have these flows we add on breth0: + // cookie=0xdeff105, duration=173.245s, table=1, n_packets=0, n_bytes=0, idle_age=173, priority=14,icmp6,icmp_type=134 actions=FLOOD + // cookie=0xdeff105, duration=173.245s, table=1, n_packets=8, n_bytes=640, idle_age=4, priority=14,icmp6,icmp_type=136 actions=FLOOD + // which floods the Router Advertisement (RA, type 134) and Neighbor Advertisement (NA, type 136) + // Given on Layer2 the GR has the SNATs for both masqueradeIPs this works perfectly well and + // the networks are able to NDP for the masqueradeIPs for the other networks. + // This doesn't work on Layer3 networks since masqueradeIP SNATs are present on the ovn-cluster-router in that case. + // See the tcpdump on the issue: https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5410 for more details. + out = "" + errBool = false + } + + // sourceIP will be joinSubnetIP for nodeports, so only using hostname endpoint + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", out, errBool + }), + ginkgo.Entry("UDN pod to a different node nodeport service in different UDN network should work", + func(ipFamilyIndex int) (clientName string, clientNamespace string, dst string, expectedOutput string, expectErr bool) { + clientPod := podsNetA[0] + // The service is backed by podNetB. + // We want to hit the nodeport on a different node from the client. + // client is on nodes[0]. Let's hit nodeport on nodes[2]. + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodes.Items[2].Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + nodeIP := node.Status.Addresses[ipFamilyIndex].Address + nodePort := svcNetB.Spec.Ports[0].NodePort + + // sourceIP will be joinSubnetIP for nodeports, so only using hostname endpoint + return clientPod.Name, clientPod.Namespace, net.JoinHostPort(nodeIP, fmt.Sprint(nodePort)) + "/hostname", "", false + }), ) }, @@ -915,13 +1098,13 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" Topology: udnv1.NetworkTopologyLayer3, Layer3: &udnv1.Layer3Config{ Role: "Primary", - Subnets: generateL3Subnets(udnv1.Layer3Subnet{ + Subnets: []udnv1.Layer3Subnet{{ CIDR: "102.102.0.0/16", HostSubnet: 24, - }, udnv1.Layer3Subnet{ + }, { CIDR: "2013:100:200::0/60", HostSubnet: 64, - }), + }}, }, }, }, @@ -935,13 +1118,13 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" Topology: udnv1.NetworkTopologyLayer3, Layer3: &udnv1.Layer3Config{ Role: "Primary", - Subnets: generateL3Subnets(udnv1.Layer3Subnet{ + Subnets: []udnv1.Layer3Subnet{{ CIDR: "103.103.0.0/16", HostSubnet: 24, - }, udnv1.Layer3Subnet{ + }, { CIDR: "2014:100:200::0/60", HostSubnet: 64, - }), + }}, }, }, }, @@ -958,7 +1141,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" Topology: udnv1.NetworkTopologyLayer2, Layer2: &udnv1.Layer2Config{ Role: "Primary", - Subnets: generateL2Subnets("102.102.0.0/16", "2013:100:200::0/60"), + Subnets: udnv1.DualStackCIDRs{"102.102.0.0/16", "2013:100:200::0/60"}, }, }, }, @@ -972,7 +1155,7 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" Topology: udnv1.NetworkTopologyLayer2, Layer2: &udnv1.Layer2Config{ Role: "Primary", - Subnets: generateL2Subnets("103.103.0.0/16", "2014:100:200::0/60"), + Subnets: udnv1.DualStackCIDRs{"103.103.0.0/16", "2014:100:200::0/60"}, }, }, }, @@ -980,24 +1163,1148 @@ var _ = ginkgo.DescribeTableSubtree("BGP: isolation between advertised networks" ), ) -func generateL3Subnets(v4, v6 udnv1.Layer3Subnet) []udnv1.Layer3Subnet { - var subnets []udnv1.Layer3Subnet - if isIPv4Supported() { - subnets = append(subnets, v4) +var _ = ginkgo.Describe("BGP: For a VRF-Lite configured network", feature.RouteAdvertisements, func() { + + // testing helpers used throughout this testing node + const ( + // FIXME: each test brings its own topology up, and sometimes zebra on + // external FRR container fails to start on the first attempt for + // unknown reasons delaying the overall availability, so we need to use + // long timeouts + timeout = 240 * time.Second + timeoutNOK = 10 * time.Second + pollingNOK = 1 * time.Second + netexecPort = 8080 + ) + var netexecPortStr = fmt.Sprintf("%d", netexecPort) + testPodToHostnameAndExpect := func(src *corev1.Pod, dstIP, expect string) { + ginkgo.GinkgoHelper() + hostname, err := e2epodoutput.RunHostCmdWithRetries( + src.Namespace, + src.Name, + fmt.Sprintf("curl --max-time 2 -g -q -s http://%s/hostname", net.JoinHostPort(dstIP, netexecPortStr)), + framework.Poll, + timeout, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(hostname).To(gomega.Equal(expect)) + } + testPodToClientIP := func(src *corev1.Pod, dstIP string) { + ginkgo.GinkgoHelper() + _, err := e2epodoutput.RunHostCmdWithRetries( + src.Namespace, + src.Name, + fmt.Sprintf("curl --max-time 2 -g -q -s http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr)), + framework.Poll, + timeout, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + testPodToClientIPAndExpect := func(src *corev1.Pod, dstIP, expect string) { + ginkgo.GinkgoHelper() + ip, err := e2epodoutput.RunHostCmdWithRetries( + src.Namespace, + src.Name, + fmt.Sprintf("curl --max-time 2 -g -q -s http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr)), + framework.Poll, + timeout, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + ip, _, err = net.SplitHostPort(ip) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(ip).To(gomega.Equal(expect)) + } + testContainerToClientIPAndExpect := func(src, dstIP, expect string) { + ginkgo.GinkgoHelper() + gomega.Eventually(func(g gomega.Gomega) { + // FIXME: using ExecK8NodeCommand instead of + // ExecExternalContainerCommand, they arent any + // different but ExecK8NodeCommand is more convinient + ip, err := infraprovider.Get().ExecK8NodeCommand( + src, + []string{"curl", "--max-time", "2", "-g", "-q", "-s", fmt.Sprintf("http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr))}, + ) + g.Expect(err).NotTo(gomega.HaveOccurred()) + ip, _, err = net.SplitHostPort(ip) + g.Expect(err).NotTo(gomega.HaveOccurred()) + g.Expect(ip).To(gomega.Equal(expect)) + }).WithTimeout(timeout).WithPolling(pollingNOK).Should(gomega.Succeed()) + } + testPodToClientIPNOK := func(src *corev1.Pod, dstIP string) { + gomega.Consistently(func(g gomega.Gomega) { + _, err := e2epodoutput.RunHostCmd( + src.Namespace, + src.Name, + fmt.Sprintf("curl --max-time 2 -g -q -s http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr)), + ) + g.Expect(err).To(gomega.HaveOccurred()) + }).WithTimeout(timeoutNOK).WithPolling(pollingNOK).Should(gomega.Succeed()) + } + testContainerToClientIPNOK := func(src, dstIP string) { + gomega.Consistently(func(g gomega.Gomega) { + _, err := infraprovider.Get().ExecK8NodeCommand( + src, + []string{"curl", "--max-time", "2", "-g", "-q", "-s", fmt.Sprintf("http://%s/clientip", net.JoinHostPort(dstIP, netexecPortStr))}, + ) + g.Expect(err).To(gomega.HaveOccurred()) + }).WithTimeout(timeoutNOK).WithPolling(pollingNOK).Should(gomega.Succeed()) + } + + const ( + baseName = "vrflite" + bgpPeerSubnetIPv4 = "172.36.0.0/16" + bgpPeerSubnetIPv6 = "fc00:f853:ccd:36::/64" + // TODO: test with overlaps but we need better isolation from the infra + // provider, docker `--internal` bridge networks with iptables based + // isolation doesn't cut it. macvlan driver might be a better option. + bgpServerSubnetIPv4 = "172.38.0.0/16" + bgpServerSubnetIPv6 = "fc00:f853:ccd:38::/64" + ) + + f := wrappedTestFramework(baseName) + f.SkipNamespaceCreation = true + var ipFamilySet sets.Set[utilnet.IPFamily] + var ictx infraapi.Context + var testBaseName, testSuffix, testNetworkName, bgpServerName string + + ginkgo.BeforeEach(func() { + if !isLocalGWModeEnabled() { + e2eskipper.Skipf("VRF-Lite test cases only supported in Local Gateway mode") + } + ipFamilySet = sets.New(getSupportedIPFamiliesSlice(f.ClientSet)...) + ictx = infraprovider.Get().NewTestContext() + testSuffix = framework.RandomSuffix() + testBaseName = baseName + testSuffix + testNetworkName = testBaseName + bgpServerName = testNetworkName + "-bgpserver" + + // we will create a agnhost server on an extra network peered with BGP + ginkgo.By("Running a BGP network with an agnhost server") + bgpPeerCIDRs := []string{bgpPeerSubnetIPv4, bgpPeerSubnetIPv6} + bgpServerCIDRs := []string{bgpServerSubnetIPv4, bgpServerSubnetIPv6} + gomega.Expect(runBGPNetworkAndServer(f, ictx, testNetworkName, bgpServerName, bgpPeerCIDRs, bgpServerCIDRs)).To(gomega.Succeed()) + }) + + // define networks to test with + const ( + cudnCIDRv4 = "103.103.0.0/16" + cudnCIDRv6 = "2014:100:200::0/60" + ) + var ( + layer3NetworkSpec = &udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer3, + Layer3: &udnv1.Layer3Config{ + Role: "Primary", + Subnets: []udnv1.Layer3Subnet{{CIDR: cudnCIDRv4, HostSubnet: 24}, {CIDR: cudnCIDRv6, HostSubnet: 64}}, + }, + } + layer2NetworkSpec = &udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer2, + Layer2: &udnv1.Layer2Config{ + Role: "Primary", + Subnets: udnv1.DualStackCIDRs{cudnCIDRv4, cudnCIDRv6}, + }, + } + ) + + matchL3SubnetsByIPFamilies := func(families sets.Set[utilnet.IPFamily], in ...udnv1.Layer3Subnet) (out []udnv1.Layer3Subnet) { + for _, subnet := range in { + if families.Has(utilnet.IPFamilyOfCIDRString(string(subnet.CIDR))) { + out = append(out, subnet) + } + } + return + } + matchL2SubnetsByIPFamilies := func(families sets.Set[utilnet.IPFamily], in ...udnv1.CIDR) (out []udnv1.CIDR) { + for _, subnet := range in { + if families.Has(utilnet.IPFamilyOfCIDRString(string(subnet))) { + out = append(out, subnet) + } + } + return + } + + networksToTest := []ginkgo.TableEntry{ + ginkgo.Entry("Layer 3", layer3NetworkSpec), + ginkgo.Entry("Layer 2", layer2NetworkSpec), + } + + ginkgo.DescribeTableSubtree("When the tested network is of type", + func(networkSpec *udnv1.NetworkSpec) { + var testNamespace *corev1.Namespace + var testPod *corev1.Pod + + getSameNode := func() string { + return testPod.Spec.NodeName + } + getDifferentNode := func() string { + ginkgo.GinkgoHelper() + nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), f.ClientSet) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get ready schedulable nodes") + for _, node := range nodes.Items { + if node.Name != testPod.Spec.NodeName { + return node.Name + } + } + ginkgo.Fail(fmt.Sprintf("Failed to find a different ready schedulable node than %s", testPod.Spec.NodeName)) + return "" + } + + ginkgo.BeforeEach(func() { + var err error + + switch { + case networkSpec.Layer3 != nil: + networkSpec.Layer3.Subnets = matchL3SubnetsByIPFamilies(ipFamilySet, networkSpec.Layer3.Subnets...) + case networkSpec.Layer2 != nil: + networkSpec.Layer2.Subnets = matchL2SubnetsByIPFamilies(ipFamilySet, networkSpec.Layer2.Subnets...) + } + + ginkgo.By("Configuring the namespace and network") + testNamespace, err = createNamespaceWithPrimaryNetworkOfType(f, ictx, testBaseName, testNetworkName, cudnAdvertisedVRFLite, networkSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + f.Namespace = testNamespace + + // attach network to the VRF on all nodes + ginkgo.By("Attaching the BGP peer network to the CUDN VRF") + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + network, err := infraprovider.Get().GetNetwork(testNetworkName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + for _, node := range nodeList.Items { + iface, err := infraprovider.Get().GetK8NodeNetworkInterface(node.Name, network) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + _, err = infraprovider.Get().ExecK8NodeCommand(node.Name, []string{"ip", "link", "set", "dev", iface.InfName, "master", testNetworkName}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + // quirk: need to reset IPv6 address + _, err = infraprovider.Get().ExecK8NodeCommand(node.Name, []string{"ip", "address", "add", iface.IPv6 + "/" + iface.IPv6Prefix, "dev", iface.InfName}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + } + }) + + ginkgo.Describe("When a pod runs on the tested network", func() { + ginkgo.BeforeEach(func() { + ginkgo.By("Running a pod on the tested network namespace") + testPod = e2epod.CreateExecPodOrFail( + context.Background(), + f.ClientSet, + testNamespace.Name, + testNamespace.Name+"-netexec-pod", + func(p *corev1.Pod) { + p.Spec.Containers[0].Args = []string{"netexec"} + }, + ) + }) + + ginkgo.DescribeTable("It can reach an external server on the same network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the pod can reach the external server") + bgpServerNetwork, err := infraprovider.Get().GetNetwork(bgpServerName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + iface, err := infraprovider.Get().GetK8NodeNetworkInterface(bgpServerName, bgpServerNetwork) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + serverIP := getFirstIPStringOfFamily(family, []string{iface.IPv4, iface.IPv6}) + gomega.Expect(serverIP).NotTo(gomega.BeEmpty()) + testPodToHostnameAndExpect(testPod, serverIP, bgpServerName) + + ginkgo.By("Ensuring a request from the pod is not SNATed") + testPodIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( + f.ClientSet, + testPod.Namespace, + testPod.Name, + testNetworkName, + family, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(testPodIP).ToNot(gomega.BeEmpty()) + testPodToClientIPAndExpect(testPod, serverIP, testPodIP) + }, + ginkgo.Entry("When the network is IPv4", utilnet.IPv4), + ginkgo.Entry("When the network is IPv6", utilnet.IPv6), + ) + + ginkgo.DescribeTable("It can be reached by an external server on the same network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the external server can reach the pod") + bgpServerNetwork, err := infraprovider.Get().GetNetwork(bgpServerName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + iface, err := infraprovider.Get().GetK8NodeNetworkInterface(bgpServerName, bgpServerNetwork) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + serverIP := getFirstIPStringOfFamily(family, []string{iface.IPv4, iface.IPv6}) + gomega.Expect(serverIP).NotTo(gomega.BeEmpty()) + podIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( + f.ClientSet, + testPod.Namespace, + testPod.Name, + testNetworkName, + family, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(podIP).ToNot(gomega.BeEmpty()) + testContainerToClientIPAndExpect(bgpServerName, podIP, serverIP) + }, + ginkgo.Entry("When the network is IPv4", utilnet.IPv4), + ginkgo.Entry("When the network is IPv6", utilnet.IPv6), + ) + + ginkgo.It("Can reach KAPI service", func() { + ginkgo.By("Ensuring a request from the pod can reach KAPI service") + output, err := e2epodoutput.RunHostCmdWithRetries( + testPod.Namespace, + testPod.Name, + "curl --max-time 2 -g -q -s -k https://kubernetes.default/healthz", + framework.Poll, + timeout, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(output).To(gomega.Equal("ok")) + }) + + ginkgo.DescribeTable("It cannot reach an external server on a different network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the pod cannot reach the external server") + // using the external server setup for the default network + bgpServerNetwork, err := infraprovider.Get().GetNetwork(bgpExternalNetworkName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + iface, err := infraprovider.Get().GetK8NodeNetworkInterface(serverContainerName, bgpServerNetwork) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + serverIP := getFirstIPStringOfFamily(family, []string{iface.IPv4, iface.IPv6}) + gomega.Expect(serverIP).NotTo(gomega.BeEmpty()) + testPodToClientIPNOK(testPod, serverIP) + }, + ginkgo.Entry("When the network is IPv4", utilnet.IPv4), + ginkgo.Entry("When the network is IPv6", utilnet.IPv6), + ) + + ginkgo.DescribeTable("It cannot be reached by an external server on a different network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the external server cannot reach the pod") + podIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( + f.ClientSet, + testPod.Namespace, + testPod.Name, + testNetworkName, + family, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(podIP).ToNot(gomega.BeEmpty()) + // using the external server setup for the default network + testContainerToClientIPNOK(serverContainerName, podIP) + }, + ginkgo.Entry("When the network is IPv4", utilnet.IPv4), + ginkgo.Entry("When the network is IPv6", utilnet.IPv6), + ) + + ginkgo.DescribeTableSubtree("It cannot be reached by a cluster node", + func(getNode func() string) { + ginkgo.DescribeTable("", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the node cannot reach the tested network pod") + podIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( + f.ClientSet, + testPod.Namespace, + testPod.Name, + testNetworkName, + family, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(podIP).ToNot(gomega.BeEmpty()) + testContainerToClientIPNOK(getNode(), podIP) + }, + ginkgo.Entry("When the network is IPv4", utilnet.IPv4), + ginkgo.Entry("When the network is IPv6", utilnet.IPv6), + ) + }, + ginkgo.Entry("When it is the same node", getSameNode), + ginkgo.Entry("When it is a different node", getDifferentNode), + ) + + ginkgo.DescribeTableSubtree("When other pod runs on the tested network", + func(getNode func() string) { + var otherPod *corev1.Pod + + ginkgo.BeforeEach(func() { + ginkgo.By("Running other pod on the tested network namespace") + otherPod = e2epod.CreateExecPodOrFail( + context.Background(), + f.ClientSet, + testNamespace.Name, + testNamespace.Name+"-netexec-pod", + func(p *corev1.Pod) { + p.Spec.Containers[0].Args = []string{"netexec"} + p.Labels = map[string]string{"app": "netexec-pod"} + }, + ) + }) + + ginkgo.DescribeTable("The pods on the tested network can reach each other", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the first pod can reach the second pod") + otherPodIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( + f.ClientSet, + otherPod.Namespace, + otherPod.Name, + testNetworkName, + family, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(otherPodIP).ToNot(gomega.BeEmpty()) + testPodToClientIP(testPod, otherPodIP) + }, + ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), + ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), + ) + + ginkgo.Describe("Backing a ClusterIP service", func() { + var service *corev1.Service + + ginkgo.BeforeEach(func() { + ginkgo.By("Creating a service backed by the other network pod") + service = e2eservice.CreateServiceSpec( + "service-for-netexec", + "", + false, + otherPod.Labels, + ) + service.Spec.Ports = []corev1.ServicePort{{Port: netexecPort}} + familyPolicy := corev1.IPFamilyPolicyPreferDualStack + service.Spec.IPFamilyPolicy = &familyPolicy + var err error + service, err = f.ClientSet.CoreV1().Services(otherPod.Namespace).Create(context.Background(), service, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + ginkgo.DescribeTable("The first pod can reach the ClusterIP service on the same network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the first pod can reach the ClusterIP service") + clusterIP := getFirstIPStringOfFamily(family, service.Spec.ClusterIPs) + gomega.Expect(clusterIP).ToNot(gomega.BeEmpty()) + testPodToClientIP(testPod, clusterIP) + }, + ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), + ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), + ) + }) + }, + ginkgo.Entry("On the same node", getSameNode), + ginkgo.Entry("On a different node", getDifferentNode), + ) + + ginkgo.Describe("When there is other network", func() { + const ( + otherBGPPeerSubnetIPv4 = "172.136.0.0/16" + otherBGPPeerSubnetIPv6 = "fc00:f853:ccd:136::/64" + otherBGPServerSubnetIPv4 = "172.138.0.0/16" + otherBGPServerSubnetIPv6 = "fc00:f853:ccd:138::/64" + otherUDNCIDRv4 = "103.203.0.0/16" + otherUDNCIDRv6 = "2014:200:200::0/60" + ) + + var ( + otherLayer3NetworkSpec = &udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer3, + Layer3: &udnv1.Layer3Config{ + Role: "Primary", + Subnets: []udnv1.Layer3Subnet{{CIDR: otherUDNCIDRv4, HostSubnet: 24}, {CIDR: otherUDNCIDRv6, HostSubnet: 64}}, + }, + } + otherLayer2NetworkSpec = &udnv1.NetworkSpec{ + Topology: udnv1.NetworkTopologyLayer2, + Layer2: &udnv1.Layer2Config{ + Role: "Primary", + Subnets: udnv1.DualStackCIDRs{otherUDNCIDRv4, otherUDNCIDRv6}, + }, + } + ) + + otherNetworksToTest := []ginkgo.TableEntry{ + ginkgo.Entry("Default", defaultNetwork, nil), + ginkgo.Entry("Layer 3 CUDN advertised VRF-Lite", cudnAdvertisedVRFLite, otherLayer3NetworkSpec), + ginkgo.Entry("Layer 2 CUDN advertised VRF-Lite", cudnAdvertisedVRFLite, otherLayer2NetworkSpec), + // The following testcases are labeled as extended, + // might not be run on all jobs + ginkgo.Entry("Layer 3 UDN non advertised", udn, otherLayer3NetworkSpec, label.Extended()), + ginkgo.Entry("Layer 3 CUDN advertised", cudnAdvertised, otherLayer3NetworkSpec, label.Extended()), + ginkgo.Entry("Layer 2 UDN non advertised", udn, otherLayer2NetworkSpec, label.Extended()), + ginkgo.Entry("Layer 2 CUDN advertised", cudnAdvertised, otherLayer2NetworkSpec, label.Extended()), + } + + ginkgo.DescribeTableSubtree("Of type", + func(networkType networkType, networkSpec *udnv1.NetworkSpec) { + var otherNamespace *corev1.Namespace + var otherNetworkName string + + ginkgo.BeforeEach(func() { + otherNetworkName = testBaseName + "-other" + otherNamespaceName := otherNetworkName + + switch { + case networkSpec == nil: + // noop + case networkSpec.Layer3 != nil: + networkSpec.Layer3.Subnets = matchL3SubnetsByIPFamilies(ipFamilySet, networkSpec.Layer3.Subnets...) + case networkSpec.Layer2 != nil: + networkSpec.Layer2.Subnets = matchL2SubnetsByIPFamilies(ipFamilySet, networkSpec.Layer2.Subnets...) + } + + // we will create a agnhost server on an extra network peered with BGP + switch networkType { + case cudnAdvertisedVRFLite: + ginkgo.By("Running other BGP network with an agnhost server") + otherBGPServerName := otherNetworkName + "-bgpserver" + bgpPeerCIDRs := []string{otherBGPPeerSubnetIPv4, otherBGPPeerSubnetIPv6} + bgpServerCIDRs := []string{otherBGPServerSubnetIPv4, otherBGPServerSubnetIPv6} + gomega.Expect(runBGPNetworkAndServer(f, ictx, otherNetworkName, otherBGPServerName, bgpPeerCIDRs, bgpServerCIDRs)).To(gomega.Succeed()) + case defaultNetwork: + otherNetworkName = "default" + } + + ginkgo.By("Creating the other namespace and network") + var err error + otherNamespace, err = createNamespaceWithPrimaryNetworkOfType(f, ictx, testBaseName, otherNamespaceName, networkType, networkSpec) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + ginkgo.DescribeTableSubtree("And a pod runs on the other network", + func(getNode func() string) { + var otherPod *corev1.Pod + + ginkgo.BeforeEach(func() { + ginkgo.By("Running a pod on the other network namespace") + otherPod = e2epod.CreateExecPodOrFail( + context.Background(), + f.ClientSet, + otherNamespace.Name, + otherNamespace.Name+"-netexec-pod", + func(p *corev1.Pod) { + p.Spec.Containers[0].Args = []string{"netexec"} + p.Spec.NodeName = getNode() + p.Labels = map[string]string{"app": "netexec-pod"} + }, + ) + }) + + ginkgo.DescribeTable("The pod on the tested network cannot reach the pod on the other network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the tested network pod cannot reach the other network pod") + otherPodIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( + f.ClientSet, + otherPod.Namespace, + otherPod.Name, + otherNetworkName, + family, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(otherPodIP).ToNot(gomega.BeEmpty()) + testPodToClientIPNOK(testPod, otherPodIP) + }, + ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), + ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), + ) + + ginkgo.DescribeTable("The pod on the other network cannot reach the pod on the tested network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the other network pod cannot reach the tested network pod") + testPodIP, err := getPodAnnotationIPsForPrimaryNetworkByIPFamily( + f.ClientSet, + testPod.Namespace, + testPod.Name, + testNetworkName, + family, + ) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(testPodIP).ToNot(gomega.BeEmpty()) + testPodToClientIPNOK(otherPod, testPodIP) + }, + ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), + ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), + ) + + ginkgo.Describe("Backing a ClusterIP service", func() { + var service *corev1.Service + + ginkgo.BeforeEach(func() { + ginkgo.By("Creating a service backed by the other network pod") + service = e2eservice.CreateServiceSpec( + "service-for-netexec", + "", + false, + otherPod.Labels, + ) + service.Spec.Ports = []corev1.ServicePort{{Port: netexecPort}} + familyPolicy := corev1.IPFamilyPolicyPreferDualStack + service.Spec.IPFamilyPolicy = &familyPolicy + var err error + service, err = f.ClientSet.CoreV1().Services(otherPod.Namespace).Create(context.Background(), service, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + ginkgo.DescribeTable("The pod on the tested network cannot reach the service on the other network", + func(family utilnet.IPFamily) { + if !ipFamilySet.Has(family) { + e2eskipper.Skipf("IP family %v not supported", family) + } + ginkgo.By("Ensuring a request from the tested network pod cannot reach the other network pod") + clusterIP := getFirstIPStringOfFamily(family, service.Spec.ClusterIPs) + gomega.Expect(clusterIP).ToNot(gomega.BeEmpty()) + testPodToClientIPNOK(testPod, clusterIP) + }, + ginkgo.Entry("When the networks are IPv4", utilnet.IPv4), + ginkgo.Entry("When the networks are IPv6", utilnet.IPv6), + ) + }) + }, + ginkgo.Entry("On the same node", getSameNode), + ginkgo.Entry("On a different node", getDifferentNode), + ) + }, + otherNetworksToTest, + ) + }) + }) + }, + networksToTest, + ) +}) + +// routeAdvertisementsReadyFunc returns a function that checks for the +// Accepted condition in the provided RouteAdvertisements +func routeAdvertisementsReadyFunc(c raclientset.Clientset, name string) func() error { + return func() error { + ra, err := c.K8sV1().RouteAdvertisements().Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return err + } + conditionType := "Accepted" + condition := meta.FindStatusCondition(ra.Status.Conditions, conditionType) + if condition == nil { + return fmt.Errorf("no %q condition found in: %v", conditionType, ra) + } + if condition.Status != metav1.ConditionTrue { + return fmt.Errorf("condition %v has unexpected status %v", condition, condition.Status) + } + return nil + } +} + +// templateInputRouter data +type templateInputRouter struct { + VRF string + NeighborsIPv4 []string + NeighborsIPv6 []string + NetworksIPv4 []string + NetworksIPv6 []string +} + +// templateInputFRR data +type templateInputFRR struct { + // Name and Label are used for FRRConfiguration metadata + Name string + Labels map[string]string + Routers []templateInputRouter +} + +// for routeadvertisements test cases we generate configuration from templates embed in the program +// +//go:embed testdata/routeadvertisements +var ratestdata embed.FS +var tmplDir = filepath.Join("testdata", "routeadvertisements") + +const frrImage = "quay.io/frrouting/frr:9.1.3" + +// generateFRRConfiguration to establish a BGP session towards the provided +// neighbors in the network's VRF configured to advertised the provided +// networks. Returns a temporary directory where the configuration is generated. +func generateFRRConfiguration(neighborIPs, advertiseNetworks []string) (directory string, err error) { + // parse configuration templates + var templates *template.Template + templates, err = template.ParseFS(ratestdata, filepath.Join(tmplDir, "frr", "*.tmpl")) + if err != nil { + return "", fmt.Errorf("failed to parse templates: %w", err) + } + + // create the directory that will hold the configuration files + directory, err = os.MkdirTemp("", "frrconf-") + if err != nil { + return "", fmt.Errorf("failed to make temp directory: %w", err) + } + defer func() { + if err != nil { + os.RemoveAll(directory) + } + }() + + // generate external frr configuration executing the templates + networksIPv4, networksIPv6 := splitCIDRStringsByIPFamily(advertiseNetworks) + neighborsIPv4, neighborsIPv6 := splitIPStringsByIPFamily(neighborIPs) + conf := templateInputFRR{ + Routers: []templateInputRouter{ + { + NeighborsIPv4: neighborsIPv4, + NetworksIPv4: networksIPv4, + NeighborsIPv6: neighborsIPv6, + NetworksIPv6: networksIPv6, + }, + }, + } + + err = executeFileTemplate(templates, directory, "frr.conf", conf) + if err != nil { + return "", fmt.Errorf("failed to execute template %q: %w", "frr.conf", err) + } + err = executeFileTemplate(templates, directory, "daemons", nil) + if err != nil { + return "", fmt.Errorf("failed to execute template %q: %w", "daemons", err) + } + + return directory, nil +} + +// generateFRRk8sConfiguration for the provided network (which doubles up as the +// FRRConfiguration instance name, VRF name and used as value of `network` +// label) to establish a BGP session towards the provided neighbors in the +// network's VRF, configured to receive advertisements for the provided +// networks. Returns a temporary directory where the configuration is generated. +func generateFRRk8sConfiguration(networkName string, neighborIPs, receiveNetworks []string) (directory string, err error) { + // parse configuration templates + var templates *template.Template + templates, err = template.ParseFS(ratestdata, filepath.Join(tmplDir, "frr-k8s", "*.tmpl")) + if err != nil { + return "", fmt.Errorf("failed to parse templates: %w", err) + } + + // create the directory that will hold the configuration files + directory, err = os.MkdirTemp("", "frrk8sconf-") + if err != nil { + return "", fmt.Errorf("failed to make temp directory: %w", err) + } + defer func() { + if err != nil { + os.RemoveAll(directory) + } + }() + + receivesIPv4, receivesIPv6 := splitCIDRStringsByIPFamily(receiveNetworks) + neighborsIPv4, neighborsIPv6 := splitIPStringsByIPFamily(neighborIPs) + conf := templateInputFRR{ + Name: networkName, + Labels: map[string]string{"network": networkName}, + Routers: []templateInputRouter{ + { + VRF: networkName, + NeighborsIPv4: neighborsIPv4, + NeighborsIPv6: neighborsIPv6, + NetworksIPv4: receivesIPv4, + NetworksIPv6: receivesIPv6, + }, + }, + } + err = executeFileTemplate(templates, directory, "frrconf.yaml", conf) + if err != nil { + return "", fmt.Errorf("failed to execute template %q: %w", "frrconf.yaml", err) + } + + return directory, nil +} + +// runBGPNetworkAndServer configures a topology appropriate to be used with +// route advertisement test cases. For VRF-Lite test cases, the caller is +// resposible to attach the peer network interface to the CUDN VRF on the nodes. +// +// ----------------- ------------------ --------------- +// | | serverNetwork | | peerNetwork | | +// | external |<--------------- | FRR router |<--( Default / CUDN VRF )-- | cluster | +// | server | | | | | +// ----------------- ------------------ --------------- +func runBGPNetworkAndServer( + f *framework.Framework, + ictx infraapi.Context, + networkName, serverName string, + peerNetworks, + serverNetworks []string, +) error { + // filter networks by supported IP families + families := getSupportedIPFamiliesSlice(f.ClientSet) + peerNetworks = matchCIDRStringsByIPFamily(peerNetworks, families...) + serverNetworks = matchCIDRStringsByIPFamily(serverNetworks, families...) + + // create BGP peer network + bgpPeerNetwork, err := ictx.CreateNetwork(networkName, peerNetworks...) + if err != nil { + return fmt.Errorf("failed to create peer network %v: %w", peerNetworks, err) + } + + // create the server network + serverNetwork, err := ictx.CreateNetwork(serverName, serverNetworks...) + if err != nil { + return fmt.Errorf("failed to create server network %v: %w", serverNetworks, err) + } + + // attach BGP peer network to all nodes + var nodeIPs []string + nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("failed to list nodes: %w", err) + } + for _, node := range nodeList.Items { + iface, err := ictx.AttachNetwork(bgpPeerNetwork, node.Name) + if err != nil { + return fmt.Errorf("failed to attach node %q to network: %w", node.Name, err) + } + nodeIPs = append(nodeIPs, iface.IPv4, iface.IPv6) + } + + // run frr container + advertiseNetworks := serverNetworks + frrConfig, err := generateFRRConfiguration(nodeIPs, advertiseNetworks) + if err != nil { + return fmt.Errorf("failed to generate FRR configuration: %w", err) + } + ictx.AddCleanUpFn(func() error { return os.RemoveAll(frrConfig) }) + frr := infraapi.ExternalContainer{ + Name: networkName + "-frr", + Image: frrImage, + Network: bgpPeerNetwork, + RuntimeArgs: []string{"--volume", frrConfig + ":" + filepath.Join(filepath.FromSlash("/"), "etc", "frr")}, + } + frr, err = ictx.CreateExternalContainer(frr) + if err != nil { + return fmt.Errorf("failed to create frr container: %w", err) + } + // enable IPv6 forwarding if required + if frr.IPv6 != "" { + _, err = infraprovider.Get().ExecExternalContainerCommand(frr, []string{"sysctl", "-w", "net.ipv6.conf.all.forwarding=1"}) + if err != nil { + return fmt.Errorf("failed to set enable IPv6 forwading on frr container: %w", err) + } + } + + // connect frr to server network + frrServerNetworkInterface, err := ictx.AttachNetwork(serverNetwork, frr.Name) + if err != nil { + return fmt.Errorf("failed to connect frr to server network: %w", err) + } + + // run server container + server := infraapi.ExternalContainer{ + Name: serverName, + Image: images.AgnHost(), + CmdArgs: []string{"netexec"}, + Network: serverNetwork, + } + _, err = ictx.CreateExternalContainer(server) + if err != nil { + return fmt.Errorf("failed to create BGP server container: %w", err) + } + + // set frr as default gateway for the server + if frrServerNetworkInterface.IPv4 != "" { + _, err = infraprovider.Get().ExecExternalContainerCommand(server, []string{"ip", "route", "add", "default", "via", frrServerNetworkInterface.IPv4}) + if err != nil { + return fmt.Errorf("failed to set default IPv4 gateway on BGP server container: %w", err) + } + } + if frrServerNetworkInterface.IPv6 != "" { + _, err = infraprovider.Get().ExecExternalContainerCommand(server, []string{"ip", "-6", "route", "add", "default", "via", frrServerNetworkInterface.IPv6}) + if err != nil { + return fmt.Errorf("failed to set default IPv6 gateway on BGP server container: %w", err) + } + + } + + // apply FRR-K8s Configuration + receiveNetworks := serverNetworks + frrK8sConfig, err := generateFRRk8sConfiguration(networkName, []string{frr.IPv4, frr.IPv6}, receiveNetworks) + if err != nil { + return fmt.Errorf("failed to generate FRR-k8s configuration: %w", err) + } + ictx.AddCleanUpFn(func() error { return os.RemoveAll(frrK8sConfig) }) + _, err = e2ekubectl.RunKubectl(deploymentconfig.Get().FRRK8sNamespace(), "create", "-f", frrK8sConfig) + if err != nil { + return fmt.Errorf("failed to apply FRRConfiguration: %w", err) + } + ictx.AddCleanUpFn(func() error { + _, err = e2ekubectl.RunKubectl(deploymentconfig.Get().FRRK8sNamespace(), "delete", "-f", frrK8sConfig) + if err != nil { + return fmt.Errorf("failed to delete FRRConfiguration: %w", err) + } + return nil + }) + + return nil +} + +type networkType string + +const ( + defaultNetwork networkType = "DEFAULT" + udn networkType = "UDN" + cudn networkType = "CUDN" + cudnAdvertised networkType = "CUDN_ADVERTISED" + cudnAdvertisedVRFLite networkType = "CUDN_ADVERTISED_VRFLITE" +) + +// createNamespaceWithPrimaryNetworkOfType helper function configures a +// namespace, a optional(C)UDN and an optional RouteAdvertisements as determined +// by `networkType` argument. The RouteAdvertisements is aligned with the +// configuration done with `runBGPNetworkAndServer` for VRF-Lite scenarios. +func createNamespaceWithPrimaryNetworkOfType( + f *framework.Framework, + ictx infraapi.Context, + test, name string, + networkType networkType, + networkSpec *udnv1.NetworkSpec, +) (*corev1.Namespace, error) { + // define some configuration based on the type of namespace/network/advertisement + var targetVRF string + var networkLabels map[string]string + var frrConfigurationLabels map[string]string + switch networkType { + case cudnAdvertised: + networkLabels = map[string]string{"advertise": name} + frrConfigurationLabels = map[string]string{"name": "receive-all"} + case cudnAdvertisedVRFLite: + targetVRF = name + networkLabels = map[string]string{"advertise": name} + frrConfigurationLabels = map[string]string{"network": name} + } + + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "e2e-framework": test, + }, + }, + } + if networkType != defaultNetwork { + namespace.Labels[RequiredUDNNamespaceLabel] = "" + } + namespace, err := f.ClientSet.CoreV1().Namespaces().Create( + context.Background(), + namespace, + metav1.CreateOptions{}, + ) + if err != nil { + return nil, fmt.Errorf("failed to create namespace: %w", err) + } + ictx.AddCleanUpFn(func() error { + return f.ClientSet.CoreV1().Namespaces().Delete(context.Background(), namespace.Name, metav1.DeleteOptions{}) + }) + + // just creating a namespace with default network, return + if networkType == defaultNetwork { + return namespace, nil + } + + err = createUserDefinedNetwork( + f, + ictx, + namespace, + name, + networkType != udn, + networkSpec, + networkLabels, + ) + if err != nil { + return nil, fmt.Errorf("failed to create primary network: %w", err) + } + + // not advertised, return + if networkType == udn || networkType == cudn { + return namespace, nil + } + + err = createRouteAdvertisements( + f, + ictx, + name, + targetVRF, + networkLabels, + frrConfigurationLabels, + ) + if err != nil { + return nil, fmt.Errorf("failed to create primary network: %w", err) + } + + return namespace, nil +} + +func createUserDefinedNetwork( + f *framework.Framework, + ictx infraapi.Context, + namespace *corev1.Namespace, + name string, + cudnType bool, + networkSpec *udnv1.NetworkSpec, + networkLabels map[string]string, +) error { + var gvr schema.GroupVersionResource + var gvk schema.GroupVersionKind + var obj runtime.Object + var client dynamic.ResourceInterface + switch { + case cudnType: + gvr = clusterUDNGVR + gvk = schema.GroupVersionKind{ + Group: gvr.Group, + Version: gvr.Version, + Kind: "ClusterUserDefinedNetwork", + } + client = f.DynamicClient.Resource(gvr) + obj = &udnv1.ClusterUserDefinedNetwork{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: networkLabels, + }, + Spec: udnv1.ClusterUserDefinedNetworkSpec{ + NamespaceSelector: metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{{ + Key: "kubernetes.io/metadata.name", + Operator: metav1.LabelSelectorOpIn, + Values: []string{namespace.Name}, + }}}, + Network: *networkSpec, + }, + } + default: + gvr = udnGVR + gvk = schema.GroupVersionKind{ + Group: gvr.Group, + Version: gvr.Version, + Kind: "UserDefinedNetwork", + } + client = f.DynamicClient.Resource(gvr).Namespace(namespace.Name) + obj = &udnv1.UserDefinedNetwork{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace.Name, + Labels: networkLabels, + }, + Spec: udnv1.UserDefinedNetworkSpec{ + Topology: networkSpec.Topology, + Layer3: networkSpec.Layer3, + Layer2: networkSpec.Layer2, + }, + } } - if isIPv6Supported() { - subnets = append(subnets, v6) + + unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return fmt.Errorf("failed to convert network to unstructured: %w", err) } - return subnets + unstructuredObj := &unstructured.Unstructured{Object: unstructuredMap} + ok := unstructuredObj.GetObjectKind() + ok.SetGroupVersionKind(gvk) + + _, err = client.Create(context.Background(), unstructuredObj, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to convert network to unstructured: %w", err) + } + ictx.AddCleanUpFn(func() error { + return client.Delete(context.Background(), name, metav1.DeleteOptions{}) + }) + wait.PollUntilContextTimeout( + context.Background(), + time.Second, + 5*time.Second, + true, + func(ctx context.Context) (bool, error) { + err = networkReadyFunc(client, name)() + return err == nil, nil + }, + ) + if err != nil { + return fmt.Errorf("failed to wait for the network to be ready: %w", err) + } + + return nil } -func generateL2Subnets(v4, v6 string) udnv1.DualStackCIDRs { - var subnets udnv1.DualStackCIDRs - if isIPv4Supported() { - subnets = append(subnets, udnv1.CIDR(v4)) +func createRouteAdvertisements( + f *framework.Framework, + ictx infraapi.Context, + name string, + targetVRF string, + networkMatchLabels map[string]string, + frrconfigurationMatchLabels map[string]string, +) error { + ra := &rav1.RouteAdvertisements{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: rav1.RouteAdvertisementsSpec{ + NetworkSelectors: apitypes.NetworkSelectors{ + apitypes.NetworkSelector{ + NetworkSelectionType: apitypes.ClusterUserDefinedNetworks, + ClusterUserDefinedNetworkSelector: &apitypes.ClusterUserDefinedNetworkSelector{ + NetworkSelector: metav1.LabelSelector{ + MatchLabels: networkMatchLabels, + }, + }, + }, + }, + FRRConfigurationSelector: metav1.LabelSelector{ + MatchLabels: frrconfigurationMatchLabels, + }, + NodeSelector: metav1.LabelSelector{}, + Advertisements: []rav1.AdvertisementType{ + rav1.PodNetwork, + }, + TargetVRF: targetVRF, + }, } - if isIPv6Supported() { - subnets = append(subnets, udnv1.CIDR(v6)) + + raClient, err := raclientset.NewForConfig(f.ClientConfig()) + if err != nil { + return fmt.Errorf("failed to create RouteAdvertisements client: %w", err) } - return subnets + _, err = raClient.K8sV1().RouteAdvertisements().Create(context.TODO(), ra, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create RouteAdvertisements: %w", err) + } + ictx.AddCleanUpFn(func() error { + return raClient.K8sV1().RouteAdvertisements().Delete(context.Background(), name, metav1.DeleteOptions{}) + }) + wait.PollUntilContextTimeout( + context.Background(), + time.Second, + 5*time.Second, + true, + func(ctx context.Context) (bool, error) { + err = routeAdvertisementsReadyFunc(*raClient, name)() + return err == nil, nil + }, + ) + if err != nil { + return fmt.Errorf("failed to wait for the RouteAdvertisements to be ready: %w", err) + } + + return nil } diff --git a/test/e2e/service.go b/test/e2e/service.go index 664a01e8ea..6e3ff61c27 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -17,6 +17,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "github.com/ovn-org/ovn-kubernetes/test/e2e/deploymentconfig" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" "github.com/ovn-org/ovn-kubernetes/test/e2e/images" "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider" infraapi "github.com/ovn-org/ovn-kubernetes/test/e2e/infraprovider/api" @@ -52,7 +53,7 @@ var ( reportPath string ) -var _ = ginkgo.Describe("Services", func() { +var _ = ginkgo.Describe("Services", feature.Service, func() { const ( serviceName = "testservice" echoServerPodNameTemplate = "echo-server-pod-%d" @@ -811,7 +812,7 @@ var _ = ginkgo.Describe("Services", func() { framework.ExpectNoError(err, "failed to get primary network") externalContainerPort := infraprovider.Get().GetExternalContainerPort() externalContainer := infraapi.ExternalContainer{Name: clientContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} + CmdArgs: getAgnHostHTTPPortBindCMDArgs(externalContainerPort), ExtPort: externalContainerPort} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "external container %s must be created", externalContainer.Name) @@ -1010,7 +1011,7 @@ var _ = ginkgo.Describe("Services", func() { Name: targetSecondaryContainerName, Image: images.AgnHost(), Network: secondaryProviderNetwork, - Args: getAgnHostHTTPPortBindCMDArgs(serverExternalContainerPort), + CmdArgs: getAgnHostHTTPPortBindCMDArgs(serverExternalContainerPort), ExtPort: serverExternalContainerPort, } serverExternalContainer, err := providerCtx.CreateExternalContainer(serverExternalContainerSpec) @@ -1314,7 +1315,7 @@ spec: ginkgo.By("Creating an external client") externalContainer := infraapi.ExternalContainer{Name: clientContainerName, Image: images.AgnHost(), Network: primaryProviderNetwork, - Args: []string{"pause"}, ExtPort: infraprovider.Get().GetExternalContainerPort()} + CmdArgs: []string{"pause"}, ExtPort: infraprovider.Get().GetExternalContainerPort()} externalContainer, err = providerCtx.CreateExternalContainer(externalContainer) framework.ExpectNoError(err, "failed to create external container", externalContainer) @@ -1424,7 +1425,7 @@ func getServiceBackendsFromPod(execPod *v1.Pod, serviceIP string, servicePort in // service ip; if the traffic was DNAT-ed to the same src pod (hairpin/loopback case) - // the srcIP of reply traffic is SNATed to the special masqurade IP 169.254.0.5 // or "fd69::5" -var _ = ginkgo.Describe("Service Hairpin SNAT", func() { +var _ = ginkgo.Describe("Service Hairpin SNAT", feature.Service, func() { const ( svcName = "service-hairpin-test" backendName = "hairpin-backend-pod" @@ -1522,7 +1523,7 @@ var _ = ginkgo.Describe("Service Hairpin SNAT", func() { }) -var _ = ginkgo.Describe("Load Balancer Service Tests with MetalLB", func() { +var _ = ginkgo.Describe("Load Balancer Service Tests with MetalLB", feature.Service, func() { const ( svcName = "lbservice-test" diff --git a/test/e2e/status_manager.go b/test/e2e/status_manager.go index b6e7a9bfeb..bae96224ae 100644 --- a/test/e2e/status_manager.go +++ b/test/e2e/status_manager.go @@ -9,6 +9,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -16,7 +17,7 @@ import ( e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" ) -var _ = ginkgo.Describe("Status manager validation", func() { +var _ = ginkgo.Describe("Status manager validation", feature.EgressFirewall, func() { const ( svcname string = "status-manager" egressFirewallYamlFile string = "egress-fw.yml" diff --git a/test/e2e/testcontext.go b/test/e2e/testcontext.go new file mode 100644 index 0000000000..1b8104ab44 --- /dev/null +++ b/test/e2e/testcontext.go @@ -0,0 +1,143 @@ +package e2e + +import ( + "errors" + "os" + "path" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/reporters" + ginkgotypes "github.com/onsi/ginkgo/v2/types" + "github.com/onsi/gomega" + + "k8s.io/klog/v2" + "k8s.io/kubernetes/test/e2e/framework" +) + +// ProcessTestContextAndSetupLogging copied up k8 e2e test framework pkg because we need to remove the label check. +func ProcessTestContextAndSetupLogging() { + t := &framework.TestContext + // default copied from k8 e2e test framework pkg + // Reconfigure gomega defaults. The poll interval should be suitable + // for most tests. The timeouts are more subjective and tests may want + // to override them, but these defaults are still better for E2E than the + // ones from Gomega (1s timeout, 10ms interval). + var defaultTimeouts = framework.TimeoutContext{ + Poll: 2 * time.Second, // from the former e2e/framework/pod poll interval + PodStart: 5 * time.Minute, + PodStartShort: 2 * time.Minute, + PodStartSlow: 15 * time.Minute, + PodDelete: 5 * time.Minute, + ClaimProvision: 5 * time.Minute, + ClaimProvisionShort: 1 * time.Minute, + DataSourceProvision: 5 * time.Minute, + ClaimBound: 3 * time.Minute, + PVReclaim: 3 * time.Minute, + PVBound: 3 * time.Minute, + PVCreate: 3 * time.Minute, + PVDelete: 5 * time.Minute, + PVDeleteSlow: 20 * time.Minute, + SnapshotCreate: 5 * time.Minute, + SnapshotDelete: 5 * time.Minute, + SnapshotControllerMetrics: 5 * time.Minute, + SystemPodsStartup: 10 * time.Minute, + NodeSchedulable: 30 * time.Minute, + SystemDaemonsetStartup: 5 * time.Minute, + NodeNotReady: 3 * time.Minute, + } + gomega.SetDefaultEventuallyPollingInterval(defaultTimeouts.Poll) + gomega.SetDefaultConsistentlyPollingInterval(defaultTimeouts.Poll) + gomega.SetDefaultEventuallyTimeout(defaultTimeouts.PodStart) + gomega.SetDefaultConsistentlyDuration(defaultTimeouts.PodStartShort) + + // Allow 1% of nodes to be unready (statistically) - relevant for large clusters. + if t.AllowedNotReadyNodes == 0 { + t.AllowedNotReadyNodes = t.CloudConfig.NumNodes / 100 + } + + // Make sure that all test runs have a valid TestContext.CloudConfig.Provider. + // TODO: whether and how long this code is needed is getting discussed + // in https://github.com/kubernetes/kubernetes/issues/70194. + if t.Provider == "" { + t.Provider = "skeleton" + } + + var err error + t.CloudConfig.Provider, err = framework.SetupProviderConfig(t.Provider) + if err != nil { + if os.IsNotExist(errors.Unwrap(err)) { + klog.Errorf("Unknown provider %q. ", t.Provider) + } else { + klog.Errorf("Failed to setup provider config for %q: %v", t.Provider, err) + } + os.Exit(1) + } + + if t.ReportDir != "" { + // Create the directory before running the suite. If + // --report-dir is not unusable, we should report + // that as soon as possible. This will be done by each worker + // in parallel, so we will get "exists" error in most of them. + if err := os.MkdirAll(t.ReportDir, 0777); err != nil && !os.IsExist(err) { + klog.Errorf("Create report dir: %v", err) + os.Exit(1) + } + ginkgoDir := path.Join(t.ReportDir, "ginkgo") + if t.ReportCompleteGinkgo || t.ReportCompleteJUnit { + if err := os.MkdirAll(ginkgoDir, 0777); err != nil && !os.IsExist(err) { + klog.Errorf("Create /ginkgo: %v", err) + os.Exit(1) + } + } + + if t.ReportCompleteGinkgo { + ginkgo.ReportAfterSuite("Ginkgo JSON report", func(report ginkgo.Report) { + gomega.Expect(reporters.GenerateJSONReport(report, path.Join(ginkgoDir, "report.json"))).NotTo(gomega.HaveOccurred()) + }) + ginkgo.ReportAfterSuite("JUnit XML report", func(report ginkgo.Report) { + gomega.Expect(reporters.GenerateJUnitReport(report, path.Join(ginkgoDir, "report.xml"))).NotTo(gomega.HaveOccurred()) + }) + } + + ginkgo.ReportAfterSuite("OVN-Kubernetes e2e JUnit report", func(report ginkgo.Report) { + // With Ginkgo v1, we used to write one file per + // parallel node. Now Ginkgo v2 automatically merges + // all results into a report for us. The 01 suffix is + // kept in case that users expect files to be called + // "junit_.xml". + junitReport := path.Join(t.ReportDir, "junit_"+t.ReportPrefix+"01.xml") + + // writeJUnitReport generates a JUnit file in the e2e + // report directory that is shorter than the one + // normally written by `ginkgo --junit-report`. This is + // needed because the full report can become too large + // for tools like Spyglass + // (https://github.com/kubernetes/kubernetes/issues/111510). + gomega.Expect(writeJUnitReport(report, junitReport)).NotTo(gomega.HaveOccurred()) + }) + } +} + +// writeJUnitReport generates a JUnit file that is shorter than the one +// normally written by `ginkgo --junit-report`. This is needed because the full +// report can become too large for tools like Spyglass +// (https://github.com/kubernetes/kubernetes/issues/111510). +func writeJUnitReport(report ginkgo.Report, filename string) error { + config := reporters.JunitReportConfig{ + // Remove details for specs where we don't care. + OmitTimelinesForSpecState: ginkgotypes.SpecStatePassed | ginkgotypes.SpecStateSkipped, + + // Don't write . The same text is + // also in the full text for the failure. If we were to write + // both, then tools like kettle and spyglass would concatenate + // the two strings and thus show duplicated information. + OmitFailureMessageAttr: true, + + // All labels are also part of the spec texts in inline [] tags, + // so we don't need to write them separately. + OmitSpecLabels: true, + } + + return reporters.GenerateJUnitReportWithConfig(report, filename, config) +} diff --git a/test/e2e/testdata/routeadvertisements/frr-k8s/frrconf.yaml.tmpl b/test/e2e/testdata/routeadvertisements/frr-k8s/frrconf.yaml.tmpl new file mode 100644 index 0000000000..ba4b4605ad --- /dev/null +++ b/test/e2e/testdata/routeadvertisements/frr-k8s/frrconf.yaml.tmpl @@ -0,0 +1,46 @@ +{{- define "frrconf.yaml" -}} +apiVersion: frrk8s.metallb.io/v1beta1 +kind: FRRConfiguration +metadata: + name: {{ .Name }} +{{- if .Labels }} + labels: +{{- range $k, $v := .Labels }} + {{ $k }}: {{ $v }} +{{- end }} +{{- end }} +spec: + bgp: + routers: +{{- range $v := .Routers }} + - asn: 64512 +{{- if .VRF }} + vrf: {{ .VRF }} +{{- end }} + neighbors: +{{- range .NeighborsIPv4 }} + - address: {{ . }} + asn: 64512 + disableMP: true + toReceive: + allowed: + mode: filtered + prefixes: +{{- range $v.NetworksIPv4 }} + - prefix: {{ . }} +{{- end }} +{{- end }} +{{- range .NeighborsIPv6 }} + - address: {{ . }} + asn: 64512 + disableMP: true + toReceive: + allowed: + mode: filtered + prefixes: +{{- range $v.NetworksIPv6 }} + - prefix: {{ . }} +{{- end }} +{{- end }} +{{- end }} +{{ end }} diff --git a/test/e2e/testdata/routeadvertisements/frr/daemons.tmpl b/test/e2e/testdata/routeadvertisements/frr/daemons.tmpl new file mode 100644 index 0000000000..5434bdf418 --- /dev/null +++ b/test/e2e/testdata/routeadvertisements/frr/daemons.tmpl @@ -0,0 +1,82 @@ +{{- define "daemons" -}} +# This file tells the frr package which daemons to start. +# +# Sample configurations for these daemons can be found in +# /usr/share/doc/frr/examples/. +# +# ATTENTION: +# +# When activating a daemon for the first time, a config file, even if it is +# empty, has to be present *and* be owned by the user and group "frr", else +# the daemon will not be started by /etc/init.d/frr. The permissions should +# be u=rw,g=r,o=. +# When using "vtysh" such a config file is also needed. It should be owned by +# group "frrvty" and set to ug=rw,o= though. Check /etc/pam.d/frr, too. +# +# The watchfrr and zebra daemons are always started. +# +bgpd=yes +ospfd=no +ospf6d=no +ripd=no +ripngd=no +isisd=no +pimd=no +ldpd=no +nhrpd=no +eigrpd=no +babeld=no +sharpd=no +pbrd=no +bfdd=yes +fabricd=no +vrrpd=no + +# +# If this option is set the /etc/init.d/frr script automatically loads +# the config via "vtysh -b" when the servers are started. +# Check /etc/pam.d/frr if you intend to use "vtysh"! +# +vtysh_enable=yes +zebra_options=" -A 127.0.0.1 -s 90000000" +bgpd_options=" -A 127.0.0.1" +ospfd_options=" -A 127.0.0.1" +ospf6d_options=" -A ::1" +ripd_options=" -A 127.0.0.1" +ripngd_options=" -A ::1" +isisd_options=" -A 127.0.0.1" +pimd_options=" -A 127.0.0.1" +ldpd_options=" -A 127.0.0.1" +nhrpd_options=" -A 127.0.0.1" +eigrpd_options=" -A 127.0.0.1" +babeld_options=" -A 127.0.0.1" +sharpd_options=" -A 127.0.0.1" +pbrd_options=" -A 127.0.0.1" +staticd_options="-A 127.0.0.1" +bfdd_options=" -A 127.0.0.1" +fabricd_options="-A 127.0.0.1" +vrrpd_options=" -A 127.0.0.1" + +# configuration profile +# +#frr_profile="traditional" +#frr_profile="datacenter" + +# +# This is the maximum number of FD's that will be available. +# Upon startup this is read by the control files and ulimit +# is called. Uncomment and use a reasonable value for your +# setup if you are expecting a large number of peers in +# say BGP. +#MAX_FDS=1024 + +# The list of daemons to watch is automatically generated by the init script. +#watchfrr_options="" + +# for debugging purposes, you can specify a "wrap" command to start instead +# of starting the daemon directly, e.g. to use valgrind on ospfd: +# ospfd_wrap="/usr/bin/valgrind" +# or you can use "all_wrap" for all daemons, e.g. to use perf record: +# all_wrap="/usr/bin/perf record --call-graph -" +# the normal daemon command is added to this at the end. +{{ end }} diff --git a/test/e2e/testdata/routeadvertisements/frr/frr.conf.tmpl b/test/e2e/testdata/routeadvertisements/frr/frr.conf.tmpl new file mode 100644 index 0000000000..a1beeab410 --- /dev/null +++ b/test/e2e/testdata/routeadvertisements/frr/frr.conf.tmpl @@ -0,0 +1,57 @@ +{{- define "frr.conf" -}} +debug zebra events +debug zebra nht detailed +debug zebra kernel +debug zebra rib detail +debug zebra nexthop detail +debug bgp keepalives +debug bgp neighbor-events +debug bgp nht +debug bgp updates +debug bgp zebra +log stdout debugging +log syslog debugging +log file /etc/frr/frr.log debugging +{{ range .Routers -}} +router bgp 64512 {{ if .VRF }}vrf {{ .VRF }}{{ end }} + no bgp default ipv4-unicast + no bgp default ipv6-unicast + no bgp network import-check +{{- range .NeighborsIPv4 }} + neighbor {{ . }} remote-as 64512 + # zebra has been observed to fail to start for unknown reasons, + # reduce timers to try to minimize delay impact on tests + neighbor {{ . }} timers connect 10 + neighbor {{ . }} timers 15 5 +{{- end }} +{{- range .NeighborsIPv6 }} + neighbor {{ . }} remote-as 64512 + neighbor {{ . }} timers connect 10 + neighbor {{ . }} timers 15 5 +{{- end }} +{{- if .NeighborsIPv4 }} + address-family ipv4 unicast +{{- range .NeighborsIPv4 }} + neighbor {{ . }} route-reflector-client + neighbor {{ . }} activate + neighbor {{ . }} next-hop-self +{{- end }} +{{- range .NetworksIPv4 }} + network {{ . }} +{{- end }} + exit-address-family +{{- end }} +{{- if .NeighborsIPv6 }} + address-family ipv6 unicast +{{- range .NeighborsIPv6 }} + neighbor {{ . }} route-reflector-client + neighbor {{ . }} activate + neighbor {{ . }} next-hop-self +{{- end }} +{{- range .NetworksIPv6 }} + network {{ . }} +{{- end }} + exit-address-family +{{- end }} +{{ end }} +{{ end }} diff --git a/test/e2e/testdata/cudn/invalid-scenarios-localnet-mtu.go b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-mtu.go similarity index 94% rename from test/e2e/testdata/cudn/invalid-scenarios-localnet-mtu.go rename to test/e2e/testscenario/cudn/invalid-scenarios-localnet-mtu.go index e1ce9e8c70..d7e3590ffd 100644 --- a/test/e2e/testdata/cudn/invalid-scenarios-localnet-mtu.go +++ b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-mtu.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" -var LocalnetInvalidMTU = []testdata.ValidateCRScenario{ +var LocalnetInvalidMTU = []testscenario.ValidateCRScenario{ { Description: "invalid MTU - higher than 65536", ExpectedErr: `spec.network.localnet.mtu in body should be less than or equal to 65536`, diff --git a/test/e2e/testdata/cudn/invalid-scenarios-localnet-phynetname.go b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-phynetname.go similarity index 97% rename from test/e2e/testdata/cudn/invalid-scenarios-localnet-phynetname.go rename to test/e2e/testscenario/cudn/invalid-scenarios-localnet-phynetname.go index 83c6664804..171678c9ca 100644 --- a/test/e2e/testdata/cudn/invalid-scenarios-localnet-phynetname.go +++ b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-phynetname.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" -var LocalnetInvalidPhyNetName = []testdata.ValidateCRScenario{ +var LocalnetInvalidPhyNetName = []testscenario.ValidateCRScenario{ { Description: "unset PhysicalNetworkName", ExpectedErr: `spec.network.localnet.physicalNetworkName: Required value`, diff --git a/test/e2e/testdata/cudn/invalid-scenarios-localnet-role.go b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-role.go similarity index 87% rename from test/e2e/testdata/cudn/invalid-scenarios-localnet-role.go rename to test/e2e/testscenario/cudn/invalid-scenarios-localnet-role.go index fad452da04..443f78970a 100644 --- a/test/e2e/testdata/cudn/invalid-scenarios-localnet-role.go +++ b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-role.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" -var LocalnetInvalidRole = []testdata.ValidateCRScenario{ +var LocalnetInvalidRole = []testscenario.ValidateCRScenario{ { Description: "role unset", ExpectedErr: `spec.network.localnet.role: Required value`, diff --git a/test/e2e/testdata/cudn/invalid-scenarios-localnet-subnets.go b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-subnets.go similarity index 98% rename from test/e2e/testdata/cudn/invalid-scenarios-localnet-subnets.go rename to test/e2e/testscenario/cudn/invalid-scenarios-localnet-subnets.go index d62a216d48..bd854acdb2 100644 --- a/test/e2e/testdata/cudn/invalid-scenarios-localnet-subnets.go +++ b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-subnets.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" -var LocalnetInvalidSubnets = []testdata.ValidateCRScenario{ +var LocalnetInvalidSubnets = []testscenario.ValidateCRScenario{ { Description: "unset subnets, and ipam.mode is unset", ExpectedErr: `Subnets is required with ipam.mode is Enabled or unset, and forbidden otherwise`, diff --git a/test/e2e/testdata/cudn/invalid-scenarios-localnet-vlan.go b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-vlan.go similarity index 95% rename from test/e2e/testdata/cudn/invalid-scenarios-localnet-vlan.go rename to test/e2e/testscenario/cudn/invalid-scenarios-localnet-vlan.go index daa393acdb..8ab71ca8dc 100644 --- a/test/e2e/testdata/cudn/invalid-scenarios-localnet-vlan.go +++ b/test/e2e/testscenario/cudn/invalid-scenarios-localnet-vlan.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" -var LocalnetInvalidVLAN = []testdata.ValidateCRScenario{ +var LocalnetInvalidVLAN = []testscenario.ValidateCRScenario{ { Description: "invalid VLAN - invalid mode", ExpectedErr: `spec.network.localnet.vlan.mode: Unsupported value: "Disabled": supported values: "Access`, diff --git a/test/e2e/testdata/cudn/invalid-scenarios-mismatch-topo-conf.go b/test/e2e/testscenario/cudn/invalid-scenarios-mismatch-topo-conf.go similarity index 95% rename from test/e2e/testdata/cudn/invalid-scenarios-mismatch-topo-conf.go rename to test/e2e/testscenario/cudn/invalid-scenarios-mismatch-topo-conf.go index 80551a94cd..ddad69d54e 100644 --- a/test/e2e/testdata/cudn/invalid-scenarios-mismatch-topo-conf.go +++ b/test/e2e/testscenario/cudn/invalid-scenarios-mismatch-topo-conf.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" -var MismatchTopologyConfig = []testdata.ValidateCRScenario{ +var MismatchTopologyConfig = []testscenario.ValidateCRScenario{ { Description: "topology is localnet but topology config is layer2", ExpectedErr: `spec.localnet is required when topology is Localnet and forbidden otherwise`, diff --git a/test/e2e/testdata/cudn/valid-scenarios-localnet.go b/test/e2e/testscenario/cudn/valid-scenarios-localnet.go similarity index 93% rename from test/e2e/testdata/cudn/valid-scenarios-localnet.go rename to test/e2e/testscenario/cudn/valid-scenarios-localnet.go index a5b188bbfd..d2c7b24d78 100644 --- a/test/e2e/testdata/cudn/valid-scenarios-localnet.go +++ b/test/e2e/testscenario/cudn/valid-scenarios-localnet.go @@ -1,8 +1,8 @@ package cudn -import "github.com/ovn-org/ovn-kubernetes/test/e2e/testdata" +import "github.com/ovn-org/ovn-kubernetes/test/e2e/testscenario" -var LocalnetValid = []testdata.ValidateCRScenario{ +var LocalnetValid = []testscenario.ValidateCRScenario{ { Description: "should create localnet topology successfully - minimal", Manifest: ` diff --git a/test/e2e/testdata/scenario.go b/test/e2e/testscenario/scenario.go similarity index 90% rename from test/e2e/testdata/scenario.go rename to test/e2e/testscenario/scenario.go index db96d3b50b..4ee247fd98 100644 --- a/test/e2e/testdata/scenario.go +++ b/test/e2e/testscenario/scenario.go @@ -1,4 +1,4 @@ -package testdata +package testscenario // ValidateCRScenario represent test scenario where a manifest is applied and failed with the expected error type ValidateCRScenario struct { diff --git a/test/e2e/unidling.go b/test/e2e/unidling.go index 9566b3190f..9f7535a9b2 100644 --- a/test/e2e/unidling.go +++ b/test/e2e/unidling.go @@ -14,6 +14,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/ovn-org/ovn-kubernetes/test/e2e/feature" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,7 +38,7 @@ const ( // Validate that Services with the well-known annotation k8s.ovn.org/idled-at // generate a NeedPods Event if the service doesn´t have endpoints and // OVN EmptyLB-Backends feature is enabled -var _ = ginkgo.Describe("Unidling", func() { +var _ = ginkgo.Describe("Unidling", feature.Unidle, func() { const ( serviceName = "empty-service" diff --git a/test/e2e/util.go b/test/e2e/util.go index aba6dcbc44..d03559e79e 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -7,9 +7,11 @@ import ( "math/rand" "net" "os" + "path/filepath" "regexp" "strconv" "strings" + "text/template" "time" "github.com/onsi/ginkgo/v2" @@ -30,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/debug" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" @@ -167,7 +168,7 @@ func newAgnhostPodOnNode(name, nodeName string, labels map[string]string, comman } // IsIPv6Cluster returns true if the kubernetes default service is IPv6 -func IsIPv6Cluster(c clientset.Interface) bool { +func IsIPv6Cluster(c kubernetes.Interface) bool { // Get the ClusterIP of the kubernetes service created in the default namespace svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get(context.Background(), "kubernetes", metav1.GetOptions{}) if err != nil { @@ -656,7 +657,7 @@ func waitClusterHealthy(f *framework.Framework, numControlPlanePods int, control // successfully rolled out following an update. // // If allowedNotReadyNodes is -1, this method returns immediately without waiting. -func waitForRollout(c clientset.Interface, ns string, resource string, allowedNotReadyNodes int32, timeout time.Duration) error { +func waitForRollout(c kubernetes.Interface, ns string, resource string, allowedNotReadyNodes int32, timeout time.Duration) error { if allowedNotReadyNodes == -1 { return nil } @@ -1129,23 +1130,46 @@ func randStr(n int) string { return string(b) } -func isIPv4Supported() bool { - val, present := os.LookupEnv("PLATFORM_IPV4_SUPPORT") - return present && val == "true" +func isCIDRIPFamilySupported(cs kubernetes.Interface, cidr string) bool { + ginkgo.GinkgoHelper() + gomega.Expect(cidr).To(gomega.ContainSubstring("/")) + isIPv6 := utilnet.IsIPv6CIDRString(cidr) + return (isIPv4Supported(cs) && !isIPv6) || (isIPv6Supported(cs) && isIPv6) } -func isIPv6Supported() bool { - val, present := os.LookupEnv("PLATFORM_IPV6_SUPPORT") - return present && val == "true" +func isIPv4Supported(cs kubernetes.Interface) bool { + v4, _ := getSupportedIPFamilies(cs) + return v4 } -func isInterconnectEnabled() bool { - val, present := os.LookupEnv("OVN_ENABLE_INTERCONNECT") - return present && val == "true" +func isIPv6Supported(cs kubernetes.Interface) bool { + _, v6 := getSupportedIPFamilies(cs) + return v6 +} + +func getSupportedIPFamilies(cs kubernetes.Interface) (bool, bool) { + n, err := e2enode.GetRandomReadySchedulableNode(context.TODO(), cs) + framework.ExpectNoError(err, "must fetch a Ready Node") + v4NodeAddrs := e2enode.GetAddressesByTypeAndFamily(n, v1.NodeInternalIP, v1.IPv4Protocol) + v6NodeAddrs := e2enode.GetAddressesByTypeAndFamily(n, v1.NodeInternalIP, v1.IPv6Protocol) + return len(v4NodeAddrs) > 0, len(v6NodeAddrs) > 0 +} + +func getSupportedIPFamiliesSlice(cs kubernetes.Interface) []utilnet.IPFamily { + v4, v6 := getSupportedIPFamilies(cs) + switch { + case v4 && v6: + return []utilnet.IPFamily{utilnet.IPv4, utilnet.IPv6} + case v4: + return []utilnet.IPFamily{utilnet.IPv4} + case v6: + return []utilnet.IPFamily{utilnet.IPv6} + } + return nil } -func isUDNHostIsolationDisabled() bool { - val, present := os.LookupEnv("DISABLE_UDN_HOST_ISOLATION") +func isInterconnectEnabled() bool { + val, present := os.LookupEnv("OVN_ENABLE_INTERCONNECT") return present && val == "true" } @@ -1217,7 +1241,7 @@ func routeToNode(nodeName string, ips []string, mtu int, add bool) error { cmd = []string{"ip", "-6"} } var err error - cmd = append(cmd, "route", ipOp, fmt.Sprintf("%s/%d", ip, mask), "dev", "breth0") + cmd = append(cmd, "route", ipOp, fmt.Sprintf("%s/%d", ip, mask), "dev", deploymentconfig.Get().ExternalBridgeName()) if mtu != 0 { cmd = append(cmd, "mtu", strconv.Itoa(mtu)) } @@ -1271,7 +1295,7 @@ func GetNodeIPv6LinkLocalAddressForEth0(nodeName string) (string, error) { // right-most match of the provided regex. Returns a map of subexpression name // to subexpression capture. A zero string name `""` maps to the full expression // capture. -func CaptureContainerOutput(ctx context.Context, c clientset.Interface, namespace, pod, container, regexpr string) (map[string]string, error) { +func CaptureContainerOutput(ctx context.Context, c kubernetes.Interface, namespace, pod, container, regexpr string) (map[string]string, error) { regex, err := regexp.Compile(regexpr) if err != nil { return nil, fmt.Errorf("failed to compile regexp %q: %w", regexpr, err) @@ -1342,9 +1366,62 @@ func matchIPv6StringFamily(ipStrings []string) (string, error) { return util.MatchIPStringFamily(true /*ipv6*/, ipStrings) } +func matchCIDRStringsByIPFamily(cidrs []string, families ...utilnet.IPFamily) []string { + var r []string + familySet := sets.New(families...) + for _, cidr := range cidrs { + if familySet.Has(utilnet.IPFamilyOfCIDRString(cidr)) { + r = append(r, cidr) + } + } + return r +} + +func splitCIDRStringsByIPFamily(cidrs []string) (ipv4 []string, ipv6 []string) { + for _, cidr := range cidrs { + switch { + case utilnet.IsIPv4CIDRString(cidr): + ipv4 = append(ipv4, cidr) + case utilnet.IsIPv6CIDRString(cidr): + ipv6 = append(ipv6, cidr) + } + } + return +} + +func splitIPStringsByIPFamily(ips []string) (ipv4 []string, ipv6 []string) { + for _, ip := range ips { + switch { + case utilnet.IsIPv4String(ip): + ipv4 = append(ipv4, ip) + case utilnet.IsIPv6String(ip): + ipv6 = append(ipv6, ip) + } + } + return +} + +func getFirstCIDROfFamily(family utilnet.IPFamily, ipnets []*net.IPNet) *net.IPNet { + for _, ipnet := range ipnets { + if utilnet.IPFamilyOfCIDR(ipnet) == family { + return ipnet + } + } + return nil +} + +func getFirstIPStringOfFamily(family utilnet.IPFamily, ips []string) string { + for _, ip := range ips { + if utilnet.IPFamilyOfString(ip) == family { + return ip + } + } + return "" +} + // This is a replacement for e2epod.DeletePodWithWait(), which does not handle pods that // may be automatically restarted (https://issues.k8s.io/126785) -func deletePodWithWait(ctx context.Context, c clientset.Interface, pod *v1.Pod) error { +func deletePodWithWait(ctx context.Context, c kubernetes.Interface, pod *v1.Pod) error { if pod == nil { return nil } @@ -1372,7 +1449,7 @@ func deletePodWithWait(ctx context.Context, c clientset.Interface, pod *v1.Pod) // This is a replacement for e2epod.DeletePodWithWaitByName(), which does not handle pods // that may be automatically restarted (https://issues.k8s.io/126785) -func deletePodWithWaitByName(ctx context.Context, c clientset.Interface, podName, podNamespace string) error { +func deletePodWithWaitByName(ctx context.Context, c kubernetes.Interface, podName, podNamespace string) error { pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { @@ -1390,7 +1467,7 @@ func deletePodWithWaitByName(ctx context.Context, c clientset.Interface, podName // This is an alternative version of e2epod.WaitForPodNotFoundInNamespace(), which takes // a UID as well. -func waitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, podName, ns string, uid types.UID, timeout time.Duration) error { +func waitForPodNotFoundInNamespace(ctx context.Context, c kubernetes.Interface, podName, ns string, uid types.UID, timeout time.Duration) error { err := framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) (*v1.Pod, error) { pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { @@ -1424,3 +1501,18 @@ func getAgnHostHTTPPortBindFullCMD(port uint16) []string { func getAgnHostHTTPPortBindCMDArgs(port uint16) []string { return []string{"netexec", fmt.Sprintf("--http-port=%d", port)} } + +// executeFileTemplate executes `name` template from the provided `templates` +// using `data`as input and writes the results to `directory/name` +func executeFileTemplate(templates *template.Template, directory, name string, data any) error { + f, err := os.OpenFile(filepath.Join(directory, name), os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return err + } + defer f.Close() + err = templates.ExecuteTemplate(f, name, data) + if err != nil { + return err + } + return nil +} diff --git a/test/scripts/e2e-cp.sh b/test/scripts/e2e-cp.sh index cf9589e589..096debe8a6 100755 --- a/test/scripts/e2e-cp.sh +++ b/test/scripts/e2e-cp.sh @@ -33,147 +33,165 @@ queries to the hostNetworked server pod on another node shall work for UDP|\ ipv4 pod" SKIPPED_TESTS="" +skip() { + if [ "$SKIPPED_TESTS" != "" ]; then + SKIPPED_TESTS+="|" + fi + SKIPPED_TESTS+=$* +} + +SKIPPED_LABELED_TESTS="" +skip_label() { + if [ "$SKIPPED_LABELED_TESTS" != "" ]; then + SKIPPED_LABELED_TESTS+=" && " + fi + SKIPPED_LABELED_TESTS+="!($*)" +} if [ "$PLATFORM_IPV4_SUPPORT" == true ]; then - if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then - # No support for these features in dual-stack yet - SKIPPED_TESTS="hybrid.overlay" - else - # Skip sflow in IPv4 since it's a long test (~5 minutes) - # We're validating netflow v5 with an ipv4 cluster, sflow with an ipv6 cluster - SKIPPED_TESTS="Should validate flow data of br-int is sent to an external gateway with sflow|ipv6 pod" - fi + if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then + # No support for these features in dual-stack yet + skip "hybrid.overlay" + else + # Skip sflow in IPv4 since it's a long test (~5 minutes) + # We're validating netflow v5 with an ipv4 cluster, sflow with an ipv6 cluster + skip "Should validate flow data of br-int is sent to an external gateway with sflow|ipv6 pod" + fi fi if [ "$PLATFORM_IPV4_SUPPORT" == false ]; then - SKIPPED_TESTS+="\[IPv4\]" + skip "\[IPv4\]" fi if [ "$OVN_HA" == false ]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi # No support for these features in no-ha mode yet # TODO streamline the db delete tests - SKIPPED_TESTS+="recovering from deleting db files while maintaining connectivity|\ -Should validate connectivity before and after deleting all the db-pods at once in HA mode" + skip "recovering from deleting db files while maintaining connectivity" + skip "Should validate connectivity before and after deleting all the db-pods at once in HA mode" else - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - - SKIPPED_TESTS+="Should validate connectivity before and after deleting all the db-pods at once in Non-HA mode|\ - e2e br-int NetFlow export validation" + skip "Should validate connectivity before and after deleting all the db-pods at once in Non-HA mode" + skip "e2e br-int NetFlow export validation" fi if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi # No support for these tests in IPv6 mode yet - SKIPPED_TESTS+=$IPV6_SKIPPED_TESTS + skip $IPV6_SKIPPED_TESTS fi if [ "$OVN_DISABLE_SNAT_MULTIPLE_GWS" == false ]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="e2e multiple external gateway stale conntrack entry deletion validation" + skip "e2e multiple external gateway stale conntrack entry deletion validation" fi if [ "$OVN_GATEWAY_MODE" == "shared" ]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="Should ensure load balancer service|LGW" # See https://github.com/ovn-org/ovn-kubernetes/issues/4138 for details + skip "Should ensure load balancer service|LGW" fi if [ "$OVN_GATEWAY_MODE" == "local" ]; then - # See https://github.com/ovn-org/ovn-kubernetes/labels/ci-ipv6 for details: + # See https://github.com/ovn-org/ovn-kubernetes/labels/ci-ipv6 for details if [ "$PLATFORM_IPV6_SUPPORT" == true ]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="Should be allowed by nodeport services|\ -Should successfully create then remove a static pod|\ -Should validate connectivity from a pod to a non-node host address on same node|\ -Should validate connectivity within a namespace of pods on separate nodes|\ -Services" + skip "Should be allowed by nodeport services" + skip "Should successfully create then remove a static pod" + skip "Should validate connectivity from a pod to a non-node host address on same node" + skip "Should validate connectivity within a namespace of pods on separate nodes" + skip "Services" fi fi # skipping the egress ip legacy health check test because it requires two # sequenced rollouts of both ovnkube-node and ovnkube-master that take a lot of # time. -SKIPPED_TESTS+="${SKIPPED_TESTS:+|}disabling egress nodes impeding Legacy health check" +skip "disabling egress nodes impeding Legacy health check" if [ "$ENABLE_MULTI_NET" != "true" ]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="Multi Homing" + skip "Multi Homing" fi if [ "$OVN_NETWORK_QOS_ENABLE" != "true" ]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="e2e NetworkQoS validation" + skip "e2e NetworkQoS validation" fi # Only run Node IP/MAC address migration tests if they are explicitly requested IP_MIGRATION_TESTS="Node IP and MAC address migration" if [[ "${WHAT}" != "${IP_MIGRATION_TESTS}"* ]]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="Node IP and MAC address migration" + skip "Node IP and MAC address migration" fi # Only run Multi node zones interconnect tests if they are explicitly requested MULTI_NODE_ZONES_TESTS="Multi node zones interconnect" if [[ "${WHAT}" != "${MULTI_NODE_ZONES_TESTS}"* ]]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="Multi node zones interconnect" + skip "Multi node zones interconnect" fi # Only run external gateway tests if they are explicitly requested EXTERNAL_GATEWAY_TESTS="External Gateway" if [[ "${WHAT}" != "${EXTERNAL_GATEWAY_TESTS}"* ]]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+="External Gateway" + skip "External Gateway" fi # Only run kubevirt virtual machines tests if they are explicitly requested KV_LIVE_MIGRATION_TESTS="Kubevirt Virtual Machines" if [[ "${WHAT}" != "${KV_LIVE_MIGRATION_TESTS}"* ]]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+=$KV_LIVE_MIGRATION_TESTS + skip $KV_LIVE_MIGRATION_TESTS fi # Only run network segmentation tests if they are explicitly requested NETWORK_SEGMENTATION_TESTS="Network Segmentation" if [[ "${WHAT}" != "${NETWORK_SEGMENTATION_TESTS}"* ]]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" - fi - SKIPPED_TESTS+=$NETWORK_SEGMENTATION_TESTS + skip $NETWORK_SEGMENTATION_TESTS fi -# Only run bgp tests if they are explicitly requested BGP_TESTS="BGP" -if [[ "${WHAT}" != "${BGP_TESTS}"* ]]; then - if [ "$SKIPPED_TESTS" != "" ]; then - SKIPPED_TESTS+="|" +if [ "$ENABLE_ROUTE_ADVERTISEMENTS" != true ]; then + skip $BGP_TESTS +else + if [ "$ADVERTISE_DEFAULT_NETWORK" = true ]; then + # Filter out extended RouteAdvertisements tests to keep job run time down + if [ "$ENABLE_NETWORK_SEGMENTATION" = true ]; then + skip_label "Feature:RouteAdvertisements && EXTENDED" + fi + + # Some test don't work when the default network is advertised, either because + # the configuration that the test excercises does not make sense for an advertised network, or + # there is some bug or functional gap + # call out case by case + + # pod reached from default network through secondary interface, asymetric, configuration does not make sense + # TODO: perhaps the secondary network attached pods should not be attached to default network + skip "Multi Homing A single pod with an OVN-K secondary network attached to a localnet network mapped to external primary interface bridge can be reached by a client pod in the default network on the same node" + skip "Multi Homing A single pod with an OVN-K secondary network attached to a localnet network mapped to external primary interface bridge can be reached by a client pod in the default network on a different node" + + # these tests require metallb but the configuration we do for it is not compatible with the configuration we do to advertise the default network + # TODO: consolidate configuration + skip "Load Balancer Service Tests with MetalLB" + skip "EgressService" + + # tests that specifically expect the node SNAT to happen + # TODO: expect the pod IP where it makes sense + skip "e2e egress firewall policy validation with external containers" + skip "e2e egress IP validation Cluster Default Network \[OVN network\] Using different methods to disable a node's availability for egress Should validate the egress IP functionality against remote hosts" + skip "e2e egress IP validation Cluster Default Network \[OVN network\] Should validate the egress IP SNAT functionality against host-networked pods" + skip "e2e egress IP validation Cluster Default Network Should validate egress IP logic when one pod is managed by more than one egressIP object" + skip "e2e egress IP validation Cluster Default Network Should re-assign egress IPs when node readiness / reachability goes down/up" + skip "Pod to external server PMTUD when a client ovnk pod targeting an external server is created when tests are run towards the agnhost echo server queries to the hostNetworked server pod on another node shall work for UDP" + + # https://issues.redhat.com/browse/OCPBUGS-55028 + skip "e2e egress IP validation Cluster Default Network \[secondary-host-eip\]" + + # https://issues.redhat.com/browse/OCPBUGS-50636 + skip "Services of type NodePort should listen on each host addresses" + skip "Services of type NodePort should work on secondary node interfaces for ETP=local and ETP=cluster when backend pods are also served by EgressIP" + + # https://github.com/ovn-kubernetes/ovn-kubernetes/issues/5240 + skip "e2e control plane test node readiness according to its defaults interface MTU size should get node not ready with a too small MTU" + + # buggy tests that don't work in dual stack mode + skip "Service Hairpin SNAT Should ensure service hairpin traffic is NOT SNATed to hairpin masquerade IP; GR LB" + skip "Services when a nodePort service targeting a pod with hostNetwork:false is created when tests are run towards the agnhost echo service queries to the nodePort service shall work for TCP" + skip "Services when a nodePort service targeting a pod with hostNetwork:true is created when tests are run towards the agnhost echo service queries to the nodePort service shall work for TCP" + skip "Services when a nodePort service targeting a pod with hostNetwork:false is created when tests are run towards the agnhost echo service queries to the nodePort service shall work for UDP" + skip "Services when a nodePort service targeting a pod with hostNetwork:true is created when tests are run towards the agnhost echo service queries to the nodePort service shall work for UDP" fi - SKIPPED_TESTS+=$BGP_TESTS fi # setting these is required to make RuntimeClass tests work ... :/ @@ -182,17 +200,23 @@ export KUBE_CONTAINER_RUNTIME_ENDPOINT=unix:///run/containerd/containerd.sock export KUBE_CONTAINER_RUNTIME_NAME=containerd export NUM_NODES=2 -FOCUS=$(echo ${@:1} | sed 's/ /\\s/g') +FOCUS=$(echo "${@:1}" | sed 's/ /\\s/g') + +# Ginkgo test timeout needs to be lower than both github's timeout and go test +# timeout to be able to get proper Ginkgo output when it happens. +TEST_TIMEOUT=${TEST_TIMEOUT:-180} +GO_TEST_TIMEOUT=$((TEST_TIMEOUT + 5)) pushd e2e go mod download -go test -test.timeout 180m -v . \ +go test -test.timeout ${GO_TEST_TIMEOUT}m -v . \ -ginkgo.v \ -ginkgo.focus ${FOCUS:-.} \ - -ginkgo.timeout 3h \ + -ginkgo.timeout ${TEST_TIMEOUT}m \ -ginkgo.flake-attempts ${FLAKE_ATTEMPTS:-2} \ -ginkgo.skip="${SKIPPED_TESTS}" \ + ${SKIPPED_LABELED_TESTS:+-ginkgo.label-filter="${SKIPPED_LABELED_TESTS}"} \ -ginkgo.junit-report=${E2E_REPORT_DIR}/junit_${E2E_REPORT_PREFIX}report.xml \ -provider skeleton \ -kubeconfig ${KUBECONFIG} \ diff --git a/test/scripts/e2e-kind.sh b/test/scripts/e2e-kind.sh index 2ec08b59ff..1cab2fc05d 100755 --- a/test/scripts/e2e-kind.sh +++ b/test/scripts/e2e-kind.sh @@ -200,7 +200,7 @@ fi # timeout needs to be lower than github's timeout. Otherwise github terminates # the job and doesn't give ginkgo a chance to print status so that we know why # the timeout happened. -TEST_TIMEOUT=${TEST_TIMEOUT:-100m} +TEST_TIMEOUT=${TEST_TIMEOUT:-120m} ginkgo --nodes=${NUM_NODES} \ --focus=${FOCUS} \ diff --git a/test/scripts/install-kind.sh b/test/scripts/install-kind.sh index d7674159e1..1b41646c7e 100755 --- a/test/scripts/install-kind.sh +++ b/test/scripts/install-kind.sh @@ -78,8 +78,5 @@ else ./kind.sh fi -if [ "$KIND_INSTALL_KUBEVIRT" == true ]; then - sudo mv ./bin/virtctl /usr/local/bin/virtctl -fi popd # go our of $SCRIPT_DIR/../../contrib